2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2017, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
39 #include <linux/init.h>
40 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/inetdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/etherdevice.h>
47 #include <linux/random.h>
48 #include <linux/spinlock.h>
49 #include <linux/ethtool.h>
50 #include <linux/fcntl.h>
53 #include <linux/reboot.h>
54 #include <linux/notifier.h>
55 #include <net/addrconf.h>
56 #ifdef ENABLE_ADAPTIVE_SCHED
57 #include <linux/cpufreq.h>
58 #endif /* ENABLE_ADAPTIVE_SCHED */
60 #include <asm/uaccess.h>
61 #include <asm/unaligned.h>
65 #include <bcmendian.h>
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
77 #include <dhd_linux.h>
81 #ifdef PCIE_FULL_DONGLE
82 #include <dhd_flowring.h>
85 #include <dhd_proto.h>
86 #include <dhd_config.h>
91 #include <dhd_debug.h>
92 #ifdef CONFIG_HAS_WAKELOCK
93 #include <linux/wakelock.h>
96 #include <wl_cfg80211.h>
105 #include <dhd_timesync.h>
106 #endif /* DHD_TIMESYNC */
109 #include <linux/compat.h>
112 #if defined(CONFIG_SOC_EXYNOS8895)
113 #include <linux/exynos-pci-ctrl.h>
114 #endif /* CONFIG_SOC_EXYNOS8895 */
117 #include <dhd_wmf_linux.h>
122 #include <bcm_l2_filter.h>
123 #include <dhd_l2_filter.h>
124 #endif /* DHD_L2_FILTER */
127 #include <dhd_psta.h>
128 #endif /* DHD_PSTA */
131 #ifdef DHDTCPACK_SUPPRESS
133 #endif /* DHDTCPACK_SUPPRESS */
134 #include <dhd_daemon.h>
135 #ifdef DHD_PKT_LOGGING
136 #include <dhd_pktlog.h>
137 #endif /* DHD_PKT_LOGGING */
138 #if defined(STAT_REPORT)
139 #include <wl_statreport.h>
140 #endif /* STAT_REPORT */
141 #ifdef DHD_DEBUG_PAGEALLOC
142 typedef void (*page_corrupt_cb_t
)(void *handle
, void *addr_corrupt
, size_t len
);
143 void dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
);
144 extern void register_page_corrupt_cb(page_corrupt_cb_t cb
, void* handle
);
145 #endif /* DHD_DEBUG_PAGEALLOC */
149 #if !defined(PCIE_FULL_DONGLE)
150 #error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
151 #endif /* !PCIE_FULL_DONGLE */
154 #if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
155 defined(DHD_LB_STATS)
157 #error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
159 #endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
162 /* Dynamic CPU selection for load balancing */
163 #include <linux/cpu.h>
164 #include <linux/cpumask.h>
165 #include <linux/notifier.h>
166 #include <linux/workqueue.h>
167 #include <asm/atomic.h>
169 #if !defined(DHD_LB_PRIMARY_CPUS)
170 #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
172 #if !defined(DHD_LB_SECONDARY_CPUS)
173 #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
176 #define HIST_BIN_SIZE 9
178 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
);
180 #if defined(DHD_LB_TXP)
181 static void dhd_lb_tx_handler(unsigned long data
);
182 static void dhd_tx_dispatcher_work(struct work_struct
* work
);
183 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
);
184 static void dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
);
186 /* Pkttag not compatible with PROP_TXSTATUS or WLFC */
187 typedef struct dhd_tx_lb_pkttag_fr
{
188 struct net_device
*net
;
190 } dhd_tx_lb_pkttag_fr_t
;
192 #define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
193 #define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
195 #define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
196 #define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
197 #endif /* DHD_LB_TXP */
200 #ifdef HOFFLOAD_MODULES
201 #include <linux/firmware.h>
205 #include <linux/time.h>
208 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
209 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
210 #define TSMAX 1000 /* max no. of timing record kept */
213 static uint32 tsidx
= 0;
214 static uint32 htsf_seqnum
= 0;
216 struct timeval tsync
;
217 static uint32 tsport
= 5010;
219 typedef struct histo_
{
223 #if !ISPOWEROF2(DHD_SDALIGN)
224 #error DHD_SDALIGN is not a power of 2!
227 static histo_t vi_d1
, vi_d2
, vi_d3
, vi_d4
;
228 #endif /* WLMEDIA_HTSF */
231 #include <bcmmsgbuf.h>
232 #include <bcmwifi_monitor.h>
235 #define htod32(i) (i)
236 #define htod16(i) (i)
237 #define dtoh32(i) (i)
238 #define dtoh16(i) (i)
239 #define htodchanspec(i) (i)
240 #define dtohchanspec(i) (i)
245 #endif /* quote_str */
248 #endif /* quote_str */
250 #define quote_str(s) to_str(s)
252 static char *driver_target
= "driver_target: "quote_str(BRCM_DRIVER_TARGET
);
253 #endif /* STBLINUX */
258 extern bool ap_cfg_running
;
259 extern bool ap_fw_loaded
;
262 #ifdef DHD_8021X_DUMP
263 extern void dhd_dump_eapol_4way_message(char *ifname
, char *dump_data
, bool direction
);
264 #endif /* DHD_8021X_DUMP */
266 #ifdef FIX_CPU_MIN_CLOCK
267 #include <linux/pm_qos.h>
268 #endif /* FIX_CPU_MIN_CLOCK */
270 #ifdef SET_RANDOM_MAC_SOFTAP
271 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
272 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
274 static u32 vendor_oui
= CONFIG_DHD_SET_RANDOM_MAC_VAL
;
275 #endif /* SET_RANDOM_MAC_SOFTAP */
277 #ifdef ENABLE_ADAPTIVE_SCHED
278 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
279 #ifndef CUSTOM_CPUFREQ_THRESH
280 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
281 #endif /* CUSTOM_CPUFREQ_THRESH */
282 #endif /* ENABLE_ADAPTIVE_SCHED */
284 /* enable HOSTIP cache update from the host side when an eth0:N is up */
285 #define AOE_IP_ALIAS_SUPPORT 1
289 #include <bcm_rpc_tp.h>
292 #include <wlfc_proto.h>
293 #include <dhd_wlfc.h>
296 #include <wl_android.h>
298 /* Maximum STA per radio */
299 #define DHD_MAX_STA 32
301 #ifdef CUSTOMER_HW_AMLOGIC
302 #include <linux/amlogic/wifi_dt.h>
306 const uint8 wme_fifo2ac
[] = { 0, 1, 2, 3, 1, 1 };
307 const uint8 prio2fifo
[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
308 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
310 #ifdef ARP_OFFLOAD_SUPPORT
311 void aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
);
312 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
313 unsigned long event
, void *ptr
);
314 static struct notifier_block dhd_inetaddr_notifier
= {
315 .notifier_call
= dhd_inetaddr_notifier_call
317 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
318 * created in kernel notifier link list (with 'next' pointing to itself)
320 static bool dhd_inetaddr_notifier_registered
= FALSE
;
321 #endif /* ARP_OFFLOAD_SUPPORT */
323 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
324 int dhd_inet6addr_notifier_call(struct notifier_block
*this,
325 unsigned long event
, void *ptr
);
326 static struct notifier_block dhd_inet6addr_notifier
= {
327 .notifier_call
= dhd_inet6addr_notifier_call
329 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
330 * created in kernel notifier link list (with 'next' pointing to itself)
332 static bool dhd_inet6addr_notifier_registered
= FALSE
;
333 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
336 #include <linux/suspend.h>
337 volatile bool dhd_mmc_suspend
= FALSE
;
338 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait
);
339 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
341 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
342 extern void dhd_enable_oob_intr(struct dhd_bus
*bus
, bool enable
);
344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
345 static void dhd_hang_process(void *dhd_info
, void *event_data
, u8 event
);
347 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
348 MODULE_LICENSE("GPL and additional rights");
349 #endif /* LinuxVer */
351 #if defined(MULTIPLE_SUPPLICANT)
352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
353 DEFINE_MUTEX(_dhd_mutex_lock_
);
354 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
357 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
358 #define MAX_CONSECUTIVE_HANG_COUNTS 5
359 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
368 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
370 #ifndef PROP_TXSTATUS
371 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
373 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
375 #endif /* BCM_FD_AGGR */
378 extern bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
);
379 extern void dhd_wlfc_plat_init(void *dhd
);
380 extern void dhd_wlfc_plat_deinit(void *dhd
);
381 #endif /* PROP_TXSTATUS */
382 extern uint sd_f2_blocksize
;
383 extern int dhdsdio_func_blocksize(dhd_pub_t
*dhd
, int function_num
, int block_size
);
385 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
391 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
393 /* Linux wireless extension support */
394 #if defined(WL_WIRELESS_EXT)
396 extern wl_iw_extra_params_t g_wl_iw_params
;
397 #endif /* defined(WL_WIRELESS_EXT) */
399 #ifdef CONFIG_PARTIALSUSPEND_SLP
400 #include <linux/partialsuspend_slp.h>
401 #define CONFIG_HAS_EARLYSUSPEND
402 #define DHD_USE_EARLYSUSPEND
403 #define register_early_suspend register_pre_suspend
404 #define unregister_early_suspend unregister_pre_suspend
405 #define early_suspend pre_suspend
406 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
408 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
409 #include <linux/earlysuspend.h>
410 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
411 #endif /* CONFIG_PARTIALSUSPEND_SLP */
413 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
414 #include <linux/nl80211.h>
415 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
418 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
, int *dtim_period
, int *bcn_interval
);
420 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
);
421 #endif /* OEM_ANDROID && BCMPCIE */
423 #ifdef PKT_FILTER_SUPPORT
424 extern void dhd_pktfilter_offload_set(dhd_pub_t
* dhd
, char *arg
);
425 extern void dhd_pktfilter_offload_enable(dhd_pub_t
* dhd
, char *arg
, int enable
, int master_mode
);
426 extern void dhd_pktfilter_offload_delete(dhd_pub_t
*dhd
, int id
);
429 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
430 static int __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
431 u8
* program
, uint32 program_len
);
432 static int __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
433 uint32 mode
, uint32 enable
);
434 static int __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
);
435 #endif /* PKT_FILTER_SUPPORT && APF */
439 static INLINE
int argos_register_notifier_init(struct net_device
*net
) { return 0;}
440 static INLINE
int argos_register_notifier_deinit(void) { return 0;}
442 #if defined(BT_OVER_SDIO)
443 extern void wl_android_set_wifi_on_flag(bool enable
);
444 #endif /* BT_OVER_SDIO */
447 #if defined(TRAFFIC_MGMT_DWM)
448 void traffic_mgmt_pkt_set_prio(dhd_pub_t
*dhdp
, void * pktbuf
);
451 #ifdef DHD_FW_COREDUMP
452 static void dhd_mem_dump(void *dhd_info
, void *event_info
, u8 event
);
453 #endif /* DHD_FW_COREDUMP */
455 #define DLD_BUFFER_NUM 2
456 /* [0]: General, [1]: Special */
457 struct dhd_log_dump_buf g_dld_buf
[DLD_BUFFER_NUM
];
458 static const int dld_buf_size
[] = {
459 (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */
460 (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */
462 static void dhd_log_dump_init(dhd_pub_t
*dhd
);
463 static void dhd_log_dump_deinit(dhd_pub_t
*dhd
);
464 static void dhd_log_dump(void *handle
, void *event_info
, u8 event
);
465 void dhd_schedule_log_dump(dhd_pub_t
*dhdp
);
466 static int do_dhd_log_dump(dhd_pub_t
*dhdp
);
467 #endif /* DHD_LOG_DUMP */
469 #ifdef DHD_DEBUG_UART
470 #include <linux/kmod.h>
471 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
472 static void dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
);
473 static void dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
);
474 #endif /* DHD_DEBUG_UART */
476 static int dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
);
477 static struct notifier_block dhd_reboot_notifier
= {
478 .notifier_call
= dhd_reboot_callback
,
483 static int is_reboot
= 0;
486 #if defined(BT_OVER_SDIO)
487 #include "dhd_bt_interface.h"
488 dhd_pub_t
*g_dhd_pub
= NULL
;
489 #endif /* defined (BT_OVER_SDIO) */
491 atomic_t exit_in_progress
= ATOMIC_INIT(0);
493 typedef struct dhd_if_event
{
494 struct list_head list
;
495 wl_event_data_if_t event
;
496 char name
[IFNAMSIZ
+1];
497 uint8 mac
[ETHER_ADDR_LEN
];
500 /* Interface control information */
501 typedef struct dhd_if
{
502 struct dhd_info
*info
; /* back pointer to dhd_info */
503 /* OS/stack specifics */
504 struct net_device
*net
;
505 int idx
; /* iface idx in dongle */
506 uint subunit
; /* subunit */
507 uint8 mac_addr
[ETHER_ADDR_LEN
]; /* assigned MAC address */
510 uint8 bssidx
; /* bsscfg index for the interface */
511 bool attached
; /* Delayed attachment when unset */
512 bool txflowcontrol
; /* Per interface flow control indicator */
513 char name
[IFNAMSIZ
+1]; /* linux interface name */
514 char dngl_name
[IFNAMSIZ
+1]; /* corresponding dongle interface name */
515 struct net_device_stats stats
;
517 dhd_wmf_t wmf
; /* per bsscfg wmf setting */
518 bool wmf_psta_disable
; /* enable/disable MC pkt to each mac
519 * of MC group behind PSTA
522 #ifdef PCIE_FULL_DONGLE
523 struct list_head sta_list
; /* sll of associated stations */
524 #if !defined(BCM_GMAC3)
525 spinlock_t sta_list_lock
; /* lock for manipulating sll */
526 #endif /* ! BCM_GMAC3 */
527 #endif /* PCIE_FULL_DONGLE */
528 uint32 ap_isolate
; /* ap-isolation settings */
533 arp_table_t
*phnd_arp_table
;
534 /* for Per BSS modification */
538 #endif /* DHD_L2_FILTER */
539 #ifdef DHD_MCAST_REGEN
540 bool mcast_regen_bss_enable
;
542 bool rx_pkt_chainable
; /* set all rx packet to chainable config by default */
543 cumm_ctr_t cumm_ctr
; /* cummulative queue length of child flowrings */
556 uint32 coef
; /* scaling factor */
557 uint32 coefdec1
; /* first decimal */
558 uint32 coefdec2
; /* second decimal */
568 static tstamp_t ts
[TSMAX
];
569 static tstamp_t maxdelayts
;
570 static uint32 maxdelay
= 0, tspktcnt
= 0, maxdelaypktno
= 0;
572 #endif /* WLMEDIA_HTSF */
574 struct ipv6_work_info_t
{
576 char ipv6_addr
[IPV6_ADDR_LEN
];
579 static void dhd_process_daemon_msg(struct sk_buff
*skb
);
580 static void dhd_destroy_to_notifier_skt(void);
581 static int dhd_create_to_notifier_skt(void);
582 static struct sock
*nl_to_event_sk
= NULL
;
585 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
586 struct netlink_kernel_cfg g_cfg
= {
588 .input
= dhd_process_daemon_msg
,
590 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
592 typedef struct dhd_dump
{
598 /* When Perimeter locks are deployed, any blocking calls must be preceeded
599 * with a PERIM UNLOCK and followed by a PERIM LOCK.
600 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
601 * wait_event_timeout().
604 /* Local private structure (extension of pub) */
605 typedef struct dhd_info
{
606 #if defined(WL_WIRELESS_EXT)
607 wl_iw_t iw
; /* wireless extensions state (must be first) */
608 #endif /* defined(WL_WIRELESS_EXT) */
610 dhd_if_t
*iflist
[DHD_MAX_IFS
]; /* for supporting multiple interfaces */
612 wifi_adapter_info_t
*adapter
; /* adapter information, interrupt, fw path etc. */
613 char fw_path
[PATH_MAX
]; /* path to firmware image */
614 char nv_path
[PATH_MAX
]; /* path to nvram vars file */
615 char clm_path
[PATH_MAX
]; /* path to clm vars file */
616 char conf_path
[PATH_MAX
]; /* path to config vars file */
617 #ifdef DHD_UCODE_DOWNLOAD
618 char uc_path
[PATH_MAX
]; /* path to ucode image */
619 #endif /* DHD_UCODE_DOWNLOAD */
621 /* serialize dhd iovars */
622 struct mutex dhd_iovar_mutex
;
624 struct semaphore proto_sem
;
626 spinlock_t wlfc_spinlock
;
629 ulong wlfc_lock_flags
;
630 ulong wlfc_pub_lock_flags
;
632 #endif /* PROP_TXSTATUS */
636 wait_queue_head_t ioctl_resp_wait
;
637 wait_queue_head_t d3ack_wait
;
638 wait_queue_head_t dhd_bus_busy_state_wait
;
639 uint32 default_wd_interval
;
641 struct timer_list timer
;
643 #ifdef DHD_PCIE_RUNTIMEPM
644 struct timer_list rpm_timer
;
645 bool rpm_timer_valid
;
646 tsk_ctl_t thr_rpm_ctl
;
647 #endif /* DHD_PCIE_RUNTIMEPM */
648 struct tasklet_struct tasklet
;
657 struct semaphore sdsem
;
658 tsk_ctl_t thr_dpc_ctl
;
659 tsk_ctl_t thr_wdt_ctl
;
662 tsk_ctl_t thr_rxf_ctl
;
664 bool rxthread_enabled
;
667 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
668 struct wake_lock wl_wifi
; /* Wifi wakelock */
669 struct wake_lock wl_rxwake
; /* Wifi rx wakelock */
670 struct wake_lock wl_ctrlwake
; /* Wifi ctrl wakelock */
671 struct wake_lock wl_wdwake
; /* Wifi wd wakelock */
672 struct wake_lock wl_evtwake
; /* Wifi event wakelock */
673 struct wake_lock wl_pmwake
; /* Wifi pm handler wakelock */
674 struct wake_lock wl_txflwake
; /* Wifi tx flow wakelock */
675 #ifdef BCMPCIE_OOB_HOST_WAKE
676 struct wake_lock wl_intrwake
; /* Host wakeup wakelock */
677 #endif /* BCMPCIE_OOB_HOST_WAKE */
678 #ifdef DHD_USE_SCAN_WAKELOCK
679 struct wake_lock wl_scanwake
; /* Wifi scan wakelock */
680 #endif /* DHD_USE_SCAN_WAKELOCK */
681 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
683 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
684 /* net_device interface lock, prevent race conditions among net_dev interface
685 * calls and wifi_on or wifi_off
687 struct mutex dhd_net_if_mutex
;
688 struct mutex dhd_suspend_mutex
;
689 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
690 struct mutex dhd_apf_mutex
;
691 #endif /* PKT_FILTER_SUPPORT && APF */
693 spinlock_t wakelock_spinlock
;
694 spinlock_t wakelock_evt_spinlock
;
695 uint32 wakelock_counter
;
696 int wakelock_wd_counter
;
697 int wakelock_rx_timeout_enable
;
698 int wakelock_ctrl_timeout_enable
;
700 uint32 wakelock_before_waive
;
702 /* Thread to issue ioctl for multicast */
703 wait_queue_head_t ctrl_wait
;
704 atomic_t pend_8021x_cnt
;
705 dhd_attach_states_t dhd_state
;
707 dhd_event_log_t event_data
;
708 #endif /* SHOW_LOGTRACE */
710 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
711 struct early_suspend early_suspend
;
712 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
714 #ifdef ARP_OFFLOAD_SUPPORT
716 #endif /* ARP_OFFLOAD_SUPPORT */
720 struct timer_list rpcth_timer
;
721 bool rpcth_timer_active
;
724 #ifdef DHDTCPACK_SUPPRESS
725 spinlock_t tcpack_lock
;
726 #endif /* DHDTCPACK_SUPPRESS */
727 #ifdef FIX_CPU_MIN_CLOCK
728 bool cpufreq_fix_status
;
729 struct mutex cpufreq_fix
;
730 struct pm_qos_request dhd_cpu_qos
;
731 #ifdef FIX_BUS_MIN_CLOCK
732 struct pm_qos_request dhd_bus_qos
;
733 #endif /* FIX_BUS_MIN_CLOCK */
734 #endif /* FIX_CPU_MIN_CLOCK */
735 void *dhd_deferred_wq
;
736 #ifdef DEBUG_CPU_FREQ
737 struct notifier_block freq_trans
;
738 int __percpu
*new_freq
;
741 struct notifier_block pm_notifier
;
743 uint32 psta_mode
; /* PSTA or PSR */
744 #endif /* DHD_PSTA */
750 struct timer_list join_timer
;
751 u32 join_timeout_val
;
752 bool join_timer_active
;
753 uint scan_time_count
;
754 struct timer_list scan_timer
;
755 bool scan_timer_active
;
758 /* CPU Load Balance dynamic CPU selection */
760 /* Variable that tracks the currect CPUs available for candidacy */
761 cpumask_var_t cpumask_curr_avail
;
763 /* Primary and secondary CPU mask */
764 cpumask_var_t cpumask_primary
, cpumask_secondary
; /* configuration */
765 cpumask_var_t cpumask_primary_new
, cpumask_secondary_new
; /* temp */
767 struct notifier_block cpu_notifier
;
769 /* Tasklet to handle Tx Completion packet freeing */
770 struct tasklet_struct tx_compl_tasklet
;
771 atomic_t tx_compl_cpu
;
773 /* Tasklet to handle RxBuf Post during Rx completion */
774 struct tasklet_struct rx_compl_tasklet
;
775 atomic_t rx_compl_cpu
;
777 /* Napi struct for handling rx packet sendup. Packets are removed from
778 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
779 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
780 * to run to rx_napi_cpu.
782 struct sk_buff_head rx_pend_queue ____cacheline_aligned
;
783 struct sk_buff_head rx_napi_queue ____cacheline_aligned
;
784 struct napi_struct rx_napi_struct ____cacheline_aligned
;
785 atomic_t rx_napi_cpu
; /* cpu on which the napi is dispatched */
786 struct net_device
*rx_napi_netdev
; /* netdev of primary interface */
788 struct work_struct rx_napi_dispatcher_work
;
789 struct work_struct tx_compl_dispatcher_work
;
790 struct work_struct tx_dispatcher_work
;
792 /* Number of times DPC Tasklet ran */
794 /* Number of times NAPI processing got scheduled */
795 uint32 napi_sched_cnt
;
796 /* Number of times NAPI processing ran on each available core */
797 uint32
*napi_percpu_run_cnt
;
798 /* Number of times RX Completions got scheduled */
799 uint32 rxc_sched_cnt
;
800 /* Number of times RX Completion ran on each available core */
801 uint32
*rxc_percpu_run_cnt
;
802 /* Number of times TX Completions got scheduled */
803 uint32 txc_sched_cnt
;
804 /* Number of times TX Completions ran on each available core */
805 uint32
*txc_percpu_run_cnt
;
807 /* Number of times each CPU came online */
808 uint32
*cpu_online_cnt
;
809 /* Number of times each CPU went offline */
810 uint32
*cpu_offline_cnt
;
812 /* Number of times TX processing run on each core */
813 uint32
*txp_percpu_run_cnt
;
814 /* Number of times TX start run on each core */
815 uint32
*tx_start_percpu_run_cnt
;
817 /* Tx load balancing */
819 /* TODO: Need to see if batch processing is really required in case of TX
820 * processing. In case of RX the Dongle can send a bunch of rx completions,
821 * hence we took a 3 queue approach
822 * enque - adds the skbs to rx_pend_queue
823 * dispatch - uses a lock and adds the list of skbs from pend queue to
825 * napi processing - copies the pend_queue into a local queue and works
827 * But for TX its going to be 1 skb at a time, so we are just thinking
828 * of using only one queue and use the lock supported skb queue functions
829 * to add and process it. If its in-efficient we'll re-visit the queue
833 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
834 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
836 * From the Tasklet that actually sends out data
837 * copy the list tx_pend_queue into tx_active_queue. There by we need
838 * to spinlock to only perform the copy the rest of the code ie to
839 * construct the tx_pend_queue and the code to process tx_active_queue
840 * can be lockless. The concept is borrowed as is from RX processing
842 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
844 /* Control TXP in runtime, enable by default */
845 atomic_t lb_txp_active
;
848 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
849 * For now, the processing tasklet will also direcly operate on this
852 struct sk_buff_head tx_pend_queue ____cacheline_aligned
;
854 /* cpu on which the DHD Tx is happenning */
857 /* CPU on which the Network stack is calling the DHD's xmit function */
860 /* Tasklet context from which the DHD's TX processing happens */
861 struct tasklet_struct tx_tasklet
;
864 * Consumer Histogram - NAPI RX Packet processing
865 * -----------------------------------------------
866 * On Each CPU, when the NAPI RX Packet processing call back was invoked
867 * how many packets were processed is captured in this data structure.
868 * Now its difficult to capture the "exact" number of packets processed.
869 * So considering the packet counter to be a 32 bit one, we have a
870 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
871 * processed is rounded off to the next power of 2 and put in the
872 * approriate "bin" the value in the bin gets incremented.
873 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
874 * and the packet count processed is as follows (assume the bin counters are 0)
875 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
876 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
877 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
879 uint32
*napi_rx_hist
[HIST_BIN_SIZE
];
880 uint32
*txc_hist
[HIST_BIN_SIZE
];
881 uint32
*rxc_hist
[HIST_BIN_SIZE
];
885 struct work_struct event_log_dispatcher_work
;
886 #endif /* SHOW_LOGTRACE */
888 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
889 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
890 struct kobject dhd_kobj
;
892 struct sk_buff_head evt_trace_queue ____cacheline_aligned
;
894 struct timer_list timesync_timer
;
895 #if defined(BT_OVER_SDIO)
896 char btfw_path
[PATH_MAX
];
897 #endif /* defined (BT_OVER_SDIO) */
900 struct net_device
*monitor_dev
; /* monitor pseudo device */
901 struct sk_buff
*monitor_skb
;
903 uint monitor_type
; /* monitor pseudo device */
904 monitor_info_t
*monitor_info
;
905 #endif /* WL_MONITOR */
907 #if defined(BT_OVER_SDIO)
908 struct mutex bus_user_lock
; /* lock for sdio bus apis shared between WLAN & BT */
909 int bus_user_count
; /* User counts of sdio bus shared between WLAN & BT */
910 #endif /* BT_OVER_SDIO */
911 #ifdef DHD_DEBUG_UART
915 wait_queue_head_t ds_exit_wait
;
916 #endif /* PCIE_INB_DW */
920 #define MONPKT_EXTRA_LEN 48
923 #define DHDIF_FWDER(dhdif) FALSE
925 #if defined(BT_OVER_SDIO)
926 /* Flag to indicate if driver is initialized */
927 uint dhd_driver_init_done
= TRUE
;
929 /* Flag to indicate if driver is initialized */
930 uint dhd_driver_init_done
= FALSE
;
932 /* Flag to indicate if we should download firmware on driver load */
933 uint dhd_download_fw_on_driverload
= TRUE
;
935 /* Definitions to provide path to the firmware and nvram
936 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
938 char firmware_path
[MOD_PARAM_PATHLEN
];
939 char nvram_path
[MOD_PARAM_PATHLEN
];
940 char clm_path
[MOD_PARAM_PATHLEN
];
941 char config_path
[MOD_PARAM_PATHLEN
];
942 #ifdef DHD_UCODE_DOWNLOAD
943 char ucode_path
[MOD_PARAM_PATHLEN
];
944 #endif /* DHD_UCODE_DOWNLOAD */
946 module_param_string(clm_path
, clm_path
, MOD_PARAM_PATHLEN
, 0660);
949 /* backup buffer for firmware and nvram path */
950 char fw_bak_path
[MOD_PARAM_PATHLEN
];
951 char nv_bak_path
[MOD_PARAM_PATHLEN
];
953 /* information string to keep firmware, chio, cheip version info visiable from log */
954 char info_string
[MOD_PARAM_INFOLEN
];
955 module_param_string(info_string
, info_string
, MOD_PARAM_INFOLEN
, 0444);
957 int disable_proptx
= 0;
958 module_param(op_mode
, int, 0644);
959 extern int wl_control_wl_start(struct net_device
*dev
);
960 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
961 struct semaphore dhd_registration_sem
;
962 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
964 /* deferred handlers */
965 static void dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
);
966 static void dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
);
967 static void dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
);
968 static void dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
);
970 #ifdef DHD_UPDATE_INTF_MAC
971 static void dhd_ifupdate_event_handler(void *handle
, void *event_info
, u8 event
);
972 #endif /* DHD_UPDATE_INTF_MAC */
973 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
974 static void dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
);
975 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
977 extern void dhd_netdev_free(struct net_device
*ndev
);
978 #endif /* WL_CFG80211 */
980 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
981 /* update rx_pkt_chainable state of dhd interface */
982 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
);
983 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
985 #ifdef HOFFLOAD_MODULES
986 char dhd_hmem_module_string
[MOD_PARAM_SRLEN
];
987 module_param_string(dhd_hmem_module_string
, dhd_hmem_module_string
, MOD_PARAM_SRLEN
, 0660);
990 module_param(dhd_msg_level
, int, 0);
991 #if defined(WL_WIRELESS_EXT)
992 module_param(iw_msg_level
, int, 0);
995 module_param(wl_dbg_level
, int, 0);
997 module_param(android_msg_level
, int, 0);
998 module_param(config_msg_level
, int, 0);
1000 #ifdef ARP_OFFLOAD_SUPPORT
1001 /* ARP offload enable */
1002 uint dhd_arp_enable
= TRUE
;
1003 module_param(dhd_arp_enable
, uint
, 0);
1005 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
1007 #ifdef ENABLE_ARP_SNOOP_MODE
1008 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
| ARP_OL_SNOOP
| ARP_OL_HOST_AUTO_REPLY
;
1010 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
;
1011 #endif /* ENABLE_ARP_SNOOP_MODE */
1013 module_param(dhd_arp_mode
, uint
, 0);
1014 #endif /* ARP_OFFLOAD_SUPPORT */
1016 /* Disable Prop tx */
1017 module_param(disable_proptx
, int, 0644);
1018 /* load firmware and/or nvram values from the filesystem */
1019 module_param_string(firmware_path
, firmware_path
, MOD_PARAM_PATHLEN
, 0660);
1020 module_param_string(nvram_path
, nvram_path
, MOD_PARAM_PATHLEN
, 0660);
1021 module_param_string(config_path
, config_path
, MOD_PARAM_PATHLEN
, 0);
1022 #ifdef DHD_UCODE_DOWNLOAD
1023 module_param_string(ucode_path
, ucode_path
, MOD_PARAM_PATHLEN
, 0660);
1024 #endif /* DHD_UCODE_DOWNLOAD */
1026 /* Watchdog interval */
1028 /* extend watchdog expiration to 2 seconds when DPC is running */
1029 #define WATCHDOG_EXTEND_INTERVAL (2000)
1031 uint dhd_watchdog_ms
= CUSTOM_DHD_WATCHDOG_MS
;
1032 module_param(dhd_watchdog_ms
, uint
, 0);
1034 #ifdef DHD_PCIE_RUNTIMEPM
1035 uint dhd_runtimepm_ms
= CUSTOM_DHD_RUNTIME_MS
;
1036 #endif /* DHD_PCIE_RUNTIMEPMT */
1037 #if defined(DHD_DEBUG)
1038 /* Console poll interval */
1039 uint dhd_console_ms
= 0;
1040 module_param(dhd_console_ms
, uint
, 0644);
1042 uint dhd_console_ms
= 0;
1043 #endif /* DHD_DEBUG */
1045 uint dhd_slpauto
= TRUE
;
1046 module_param(dhd_slpauto
, uint
, 0);
1048 #ifdef PKT_FILTER_SUPPORT
1049 /* Global Pkt filter enable control */
1050 uint dhd_pkt_filter_enable
= TRUE
;
1051 module_param(dhd_pkt_filter_enable
, uint
, 0);
1054 /* Pkt filter init setup */
1055 uint dhd_pkt_filter_init
= 0;
1056 module_param(dhd_pkt_filter_init
, uint
, 0);
1058 /* Pkt filter mode control */
1059 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
1060 uint dhd_master_mode
= FALSE
;
1062 uint dhd_master_mode
= FALSE
;
1063 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
1064 module_param(dhd_master_mode
, uint
, 0);
1066 int dhd_watchdog_prio
= 0;
1067 module_param(dhd_watchdog_prio
, int, 0);
1069 /* DPC thread priority */
1070 int dhd_dpc_prio
= CUSTOM_DPC_PRIO_SETTING
;
1071 module_param(dhd_dpc_prio
, int, 0);
1073 /* RX frame thread priority */
1074 int dhd_rxf_prio
= CUSTOM_RXF_PRIO_SETTING
;
1075 module_param(dhd_rxf_prio
, int, 0);
1077 #if !defined(BCMDBUS)
1078 extern int dhd_dongle_ramsize
;
1079 module_param(dhd_dongle_ramsize
, int, 0);
1080 #endif /* !BCMDBUS */
1083 int passive_channel_skip
= 0;
1084 module_param(passive_channel_skip
, int, (S_IRUSR
|S_IWUSR
));
1085 #endif /* WL_CFG80211 */
1087 /* Keep track of number of instances */
1088 static int dhd_found
= 0;
1089 static int instance_base
= 0; /* Starting instance number */
1090 module_param(instance_base
, int, 0644);
1092 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
1093 static int dhd_napi_weight
= 32;
1094 module_param(dhd_napi_weight
, int, 0644);
1095 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
1097 #ifdef PCIE_FULL_DONGLE
1098 extern int h2d_max_txpost
;
1099 module_param(h2d_max_txpost
, int, 0644);
1100 #endif /* PCIE_FULL_DONGLE */
1102 #ifdef DHD_DHCP_DUMP
1104 struct iphdr ip_header
;
1105 struct udphdr udp_header
;
1110 uint32 transaction_id
;
1117 uint8 hw_address
[16];
1118 uint8 server_name
[64];
1119 uint8 file_name
[128];
1123 static const uint8 bootp_magic_cookie
[4] = { 99, 130, 83, 99 };
1124 static const char dhcp_ops
[][10] = {
1125 "NA", "REQUEST", "REPLY"
1127 static const char dhcp_types
[][10] = {
1128 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
1130 static void dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
1131 #endif /* DHD_DHCP_DUMP */
1133 #ifdef DHD_ICMP_DUMP
1134 #include <net/icmp.h>
1135 static void dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
1136 #endif /* DHD_ICMP_DUMP */
1138 /* Functions to manage sysfs interface for dhd */
1139 static int dhd_sysfs_init(dhd_info_t
*dhd
);
1140 static void dhd_sysfs_exit(dhd_info_t
*dhd
);
1142 #ifdef SHOW_LOGTRACE
1143 #if defined(CUSTOMER_HW4_DEBUG)
1144 static char *logstrs_path
= PLATFORM_PATH
"logstrs.bin";
1145 static char *st_str_file_path
= PLATFORM_PATH
"rtecdc.bin";
1146 static char *map_file_path
= PLATFORM_PATH
"rtecdc.map";
1147 static char *rom_st_str_file_path
= PLATFORM_PATH
"roml.bin";
1148 static char *rom_map_file_path
= PLATFORM_PATH
"roml.map";
1149 #elif defined(CUSTOMER_HW2)
1150 static char *logstrs_path
= "/data/misc/wifi/logstrs.bin";
1151 static char *st_str_file_path
= "/data/misc/wifi/rtecdc.bin";
1152 static char *map_file_path
= "/data/misc/wifi/rtecdc.map";
1153 static char *rom_st_str_file_path
= "/data/misc/wifi/roml.bin";
1154 static char *rom_map_file_path
= "/data/misc/wifi/roml.map";
1156 static char *logstrs_path
= "/installmedia/logstrs.bin";
1157 static char *st_str_file_path
= "/installmedia/rtecdc.bin";
1158 static char *map_file_path
= "/installmedia/rtecdc.map";
1159 static char *rom_st_str_file_path
= "/installmedia/roml.bin";
1160 static char *rom_map_file_path
= "/installmedia/roml.map";
1161 #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */
1162 static char *ram_file_str
= "rtecdc";
1163 static char *rom_file_str
= "roml";
1165 module_param(logstrs_path
, charp
, S_IRUGO
);
1166 module_param(st_str_file_path
, charp
, S_IRUGO
);
1167 module_param(map_file_path
, charp
, S_IRUGO
);
1168 module_param(rom_st_str_file_path
, charp
, S_IRUGO
);
1169 module_param(rom_map_file_path
, charp
, S_IRUGO
);
1171 static int dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
);
1172 static int dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
1173 uint32
*rodata_end
);
1174 static int dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
,
1176 #endif /* SHOW_LOGTRACE */
1181 dhd_lb_set_default_cpus(dhd_info_t
*dhd
)
1183 /* Default CPU allocation for the jobs */
1184 atomic_set(&dhd
->rx_napi_cpu
, 1);
1185 atomic_set(&dhd
->rx_compl_cpu
, 2);
1186 atomic_set(&dhd
->tx_compl_cpu
, 2);
1187 atomic_set(&dhd
->tx_cpu
, 2);
1188 atomic_set(&dhd
->net_tx_cpu
, 0);
1192 dhd_cpumasks_deinit(dhd_info_t
*dhd
)
1194 free_cpumask_var(dhd
->cpumask_curr_avail
);
1195 free_cpumask_var(dhd
->cpumask_primary
);
1196 free_cpumask_var(dhd
->cpumask_primary_new
);
1197 free_cpumask_var(dhd
->cpumask_secondary
);
1198 free_cpumask_var(dhd
->cpumask_secondary_new
);
1202 dhd_cpumasks_init(dhd_info_t
*dhd
)
1205 uint32 cpus
, num_cpus
= num_possible_cpus();
1208 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__
,
1209 DHD_LB_PRIMARY_CPUS
, DHD_LB_SECONDARY_CPUS
));
1211 if (!alloc_cpumask_var(&dhd
->cpumask_curr_avail
, GFP_KERNEL
) ||
1212 !alloc_cpumask_var(&dhd
->cpumask_primary
, GFP_KERNEL
) ||
1213 !alloc_cpumask_var(&dhd
->cpumask_primary_new
, GFP_KERNEL
) ||
1214 !alloc_cpumask_var(&dhd
->cpumask_secondary
, GFP_KERNEL
) ||
1215 !alloc_cpumask_var(&dhd
->cpumask_secondary_new
, GFP_KERNEL
)) {
1216 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__
));
1221 cpumask_copy(dhd
->cpumask_curr_avail
, cpu_online_mask
);
1222 cpumask_clear(dhd
->cpumask_primary
);
1223 cpumask_clear(dhd
->cpumask_secondary
);
1225 if (num_cpus
> 32) {
1226 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__
, num_cpus
));
1230 cpus
= DHD_LB_PRIMARY_CPUS
;
1231 for (id
= 0; id
< num_cpus
; id
++) {
1232 if (isset(&cpus
, id
))
1233 cpumask_set_cpu(id
, dhd
->cpumask_primary
);
1236 cpus
= DHD_LB_SECONDARY_CPUS
;
1237 for (id
= 0; id
< num_cpus
; id
++) {
1238 if (isset(&cpus
, id
))
1239 cpumask_set_cpu(id
, dhd
->cpumask_secondary
);
1244 dhd_cpumasks_deinit(dhd
);
1249 * The CPU Candidacy Algorithm
1250 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1251 * The available CPUs for selection are divided into two groups
1252 * Primary Set - A CPU mask that carries the First Choice CPUs
1253 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
1255 * There are two types of Job, that needs to be assigned to
1256 * the CPUs, from one of the above mentioned CPU group. The Jobs are
1257 * 1) Rx Packet Processing - napi_cpu
1258 * 2) Completion Processiong (Tx, RX) - compl_cpu
1260 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
1261 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
1262 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
1263 * If there are more processors free, it assigns one to compl_cpu.
1264 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
1265 * CPU, as much as possible.
1267 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
1268 * would allow Tx completion skb's to be released into a local free pool from
1269 * which the rx buffer posts could have been serviced. it is important to note
1270 * that a Tx packet may not have a large enough buffer for rx posting.
1272 void dhd_select_cpu_candidacy(dhd_info_t
*dhd
)
1274 uint32 primary_available_cpus
; /* count of primary available cpus */
1275 uint32 secondary_available_cpus
; /* count of secondary available cpus */
1276 uint32 napi_cpu
= 0; /* cpu selected for napi rx processing */
1277 uint32 compl_cpu
= 0; /* cpu selected for completion jobs */
1278 uint32 tx_cpu
= 0; /* cpu selected for tx processing job */
1280 cpumask_clear(dhd
->cpumask_primary_new
);
1281 cpumask_clear(dhd
->cpumask_secondary_new
);
1284 * Now select from the primary mask. Even if a Job is
1285 * already running on a CPU in secondary group, we still move
1286 * to primary CPU. So no conditional checks.
1288 cpumask_and(dhd
->cpumask_primary_new
, dhd
->cpumask_primary
,
1289 dhd
->cpumask_curr_avail
);
1291 cpumask_and(dhd
->cpumask_secondary_new
, dhd
->cpumask_secondary
,
1292 dhd
->cpumask_curr_avail
);
1294 primary_available_cpus
= cpumask_weight(dhd
->cpumask_primary_new
);
1296 if (primary_available_cpus
> 0) {
1297 napi_cpu
= cpumask_first(dhd
->cpumask_primary_new
);
1299 /* If no further CPU is available,
1300 * cpumask_next returns >= nr_cpu_ids
1302 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_primary_new
);
1303 if (tx_cpu
>= nr_cpu_ids
)
1306 /* In case there are no more CPUs, do completions & Tx in same CPU */
1307 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_primary_new
);
1308 if (compl_cpu
>= nr_cpu_ids
)
1312 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1313 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
1315 /* -- Now check for the CPUs from the secondary mask -- */
1316 secondary_available_cpus
= cpumask_weight(dhd
->cpumask_secondary_new
);
1318 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
1319 __FUNCTION__
, secondary_available_cpus
, nr_cpu_ids
));
1321 if (secondary_available_cpus
> 0) {
1322 /* At this point if napi_cpu is unassigned it means no CPU
1323 * is online from Primary Group
1325 if (napi_cpu
== 0) {
1326 napi_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
1327 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_secondary_new
);
1328 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
1329 } else if (tx_cpu
== 0) {
1330 tx_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
1331 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
1332 } else if (compl_cpu
== 0) {
1333 compl_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
1336 /* If no CPU was available for tx processing, choose CPU 0 */
1337 if (tx_cpu
>= nr_cpu_ids
)
1340 /* If no CPU was available for completion, choose CPU 0 */
1341 if (compl_cpu
>= nr_cpu_ids
)
1344 if ((primary_available_cpus
== 0) &&
1345 (secondary_available_cpus
== 0)) {
1346 /* No CPUs available from primary or secondary mask */
1352 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1353 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
1355 ASSERT(napi_cpu
< nr_cpu_ids
);
1356 ASSERT(compl_cpu
< nr_cpu_ids
);
1357 ASSERT(tx_cpu
< nr_cpu_ids
);
1359 atomic_set(&dhd
->rx_napi_cpu
, napi_cpu
);
1360 atomic_set(&dhd
->tx_compl_cpu
, compl_cpu
);
1361 atomic_set(&dhd
->rx_compl_cpu
, compl_cpu
);
1362 atomic_set(&dhd
->tx_cpu
, tx_cpu
);
1368 * Function to handle CPU Hotplug notifications.
1369 * One of the task it does is to trigger the CPU Candidacy algorithm
1370 * for load balancing.
1373 dhd_cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
1375 unsigned long int cpu
= (unsigned long int)hcpu
;
1377 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1378 #pragma GCC diagnostic push
1379 #pragma GCC diagnostic ignored "-Wcast-qual"
1381 dhd_info_t
*dhd
= container_of(nfb
, dhd_info_t
, cpu_notifier
);
1382 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1383 #pragma GCC diagnostic pop
1386 if (!dhd
|| !(dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
)) {
1387 DHD_INFO(("%s(): LB data is not initialized yet.\n",
1395 case CPU_ONLINE_FROZEN
:
1396 DHD_LB_STATS_INCR(dhd
->cpu_online_cnt
[cpu
]);
1397 cpumask_set_cpu(cpu
, dhd
->cpumask_curr_avail
);
1398 dhd_select_cpu_candidacy(dhd
);
1401 case CPU_DOWN_PREPARE
:
1402 case CPU_DOWN_PREPARE_FROZEN
:
1403 DHD_LB_STATS_INCR(dhd
->cpu_offline_cnt
[cpu
]);
1404 cpumask_clear_cpu(cpu
, dhd
->cpumask_curr_avail
);
1405 dhd_select_cpu_candidacy(dhd
);
1414 #if defined(DHD_LB_STATS)
1415 void dhd_lb_stats_init(dhd_pub_t
*dhdp
)
1418 int i
, j
, num_cpus
= num_possible_cpus();
1419 int alloc_size
= sizeof(uint32
) * num_cpus
;
1422 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1429 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1433 DHD_LB_STATS_CLR(dhd
->dhd_dpc_cnt
);
1434 DHD_LB_STATS_CLR(dhd
->napi_sched_cnt
);
1436 dhd
->napi_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1437 if (!dhd
->napi_percpu_run_cnt
) {
1438 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
1442 for (i
= 0; i
< num_cpus
; i
++)
1443 DHD_LB_STATS_CLR(dhd
->napi_percpu_run_cnt
[i
]);
1445 DHD_LB_STATS_CLR(dhd
->rxc_sched_cnt
);
1447 dhd
->rxc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1448 if (!dhd
->rxc_percpu_run_cnt
) {
1449 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
1453 for (i
= 0; i
< num_cpus
; i
++)
1454 DHD_LB_STATS_CLR(dhd
->rxc_percpu_run_cnt
[i
]);
1456 DHD_LB_STATS_CLR(dhd
->txc_sched_cnt
);
1458 dhd
->txc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1459 if (!dhd
->txc_percpu_run_cnt
) {
1460 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1464 for (i
= 0; i
< num_cpus
; i
++)
1465 DHD_LB_STATS_CLR(dhd
->txc_percpu_run_cnt
[i
]);
1467 dhd
->cpu_online_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1468 if (!dhd
->cpu_online_cnt
) {
1469 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1473 for (i
= 0; i
< num_cpus
; i
++)
1474 DHD_LB_STATS_CLR(dhd
->cpu_online_cnt
[i
]);
1476 dhd
->cpu_offline_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1477 if (!dhd
->cpu_offline_cnt
) {
1478 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1482 for (i
= 0; i
< num_cpus
; i
++)
1483 DHD_LB_STATS_CLR(dhd
->cpu_offline_cnt
[i
]);
1485 dhd
->txp_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1486 if (!dhd
->txp_percpu_run_cnt
) {
1487 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1491 for (i
= 0; i
< num_cpus
; i
++)
1492 DHD_LB_STATS_CLR(dhd
->txp_percpu_run_cnt
[i
]);
1494 dhd
->tx_start_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1495 if (!dhd
->tx_start_percpu_run_cnt
) {
1496 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1500 for (i
= 0; i
< num_cpus
; i
++)
1501 DHD_LB_STATS_CLR(dhd
->tx_start_percpu_run_cnt
[i
]);
1503 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1504 dhd
->napi_rx_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1505 if (!dhd
->napi_rx_hist
[j
]) {
1506 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1510 for (i
= 0; i
< num_cpus
; i
++) {
1511 DHD_LB_STATS_CLR(dhd
->napi_rx_hist
[j
][i
]);
1515 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1516 dhd
->txc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1517 if (!dhd
->txc_hist
[j
]) {
1518 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1522 for (i
= 0; i
< num_cpus
; i
++) {
1523 DHD_LB_STATS_CLR(dhd
->txc_hist
[j
][i
]);
1526 #endif /* DHD_LB_TXC */
1528 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1529 dhd
->rxc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1530 if (!dhd
->rxc_hist
[j
]) {
1531 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1535 for (i
= 0; i
< num_cpus
; i
++) {
1536 DHD_LB_STATS_CLR(dhd
->rxc_hist
[j
][i
]);
1539 #endif /* DHD_LB_RXC */
1543 void dhd_lb_stats_deinit(dhd_pub_t
*dhdp
)
1546 int j
, num_cpus
= num_possible_cpus();
1547 int alloc_size
= sizeof(uint32
) * num_cpus
;
1550 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1557 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1561 if (dhd
->napi_percpu_run_cnt
) {
1562 MFREE(dhdp
->osh
, dhd
->napi_percpu_run_cnt
, alloc_size
);
1563 dhd
->napi_percpu_run_cnt
= NULL
;
1565 if (dhd
->rxc_percpu_run_cnt
) {
1566 MFREE(dhdp
->osh
, dhd
->rxc_percpu_run_cnt
, alloc_size
);
1567 dhd
->rxc_percpu_run_cnt
= NULL
;
1569 if (dhd
->txc_percpu_run_cnt
) {
1570 MFREE(dhdp
->osh
, dhd
->txc_percpu_run_cnt
, alloc_size
);
1571 dhd
->txc_percpu_run_cnt
= NULL
;
1573 if (dhd
->cpu_online_cnt
) {
1574 MFREE(dhdp
->osh
, dhd
->cpu_online_cnt
, alloc_size
);
1575 dhd
->cpu_online_cnt
= NULL
;
1577 if (dhd
->cpu_offline_cnt
) {
1578 MFREE(dhdp
->osh
, dhd
->cpu_offline_cnt
, alloc_size
);
1579 dhd
->cpu_offline_cnt
= NULL
;
1582 if (dhd
->txp_percpu_run_cnt
) {
1583 MFREE(dhdp
->osh
, dhd
->txp_percpu_run_cnt
, alloc_size
);
1584 dhd
->txp_percpu_run_cnt
= NULL
;
1586 if (dhd
->tx_start_percpu_run_cnt
) {
1587 MFREE(dhdp
->osh
, dhd
->tx_start_percpu_run_cnt
, alloc_size
);
1588 dhd
->tx_start_percpu_run_cnt
= NULL
;
1591 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1592 if (dhd
->napi_rx_hist
[j
]) {
1593 MFREE(dhdp
->osh
, dhd
->napi_rx_hist
[j
], alloc_size
);
1594 dhd
->napi_rx_hist
[j
] = NULL
;
1597 if (dhd
->txc_hist
[j
]) {
1598 MFREE(dhdp
->osh
, dhd
->txc_hist
[j
], alloc_size
);
1599 dhd
->txc_hist
[j
] = NULL
;
1601 #endif /* DHD_LB_TXC */
1603 if (dhd
->rxc_hist
[j
]) {
1604 MFREE(dhdp
->osh
, dhd
->rxc_hist
[j
], alloc_size
);
1605 dhd
->rxc_hist
[j
] = NULL
;
1607 #endif /* DHD_LB_RXC */
1613 static void dhd_lb_stats_dump_histo(
1614 struct bcmstrbuf
*strbuf
, uint32
**hist
)
1617 uint32
*per_cpu_total
;
1619 uint32 num_cpus
= num_possible_cpus();
1621 per_cpu_total
= (uint32
*)kmalloc(sizeof(uint32
) * num_cpus
, GFP_ATOMIC
);
1622 if (!per_cpu_total
) {
1623 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__
));
1626 bzero(per_cpu_total
, sizeof(uint32
) * num_cpus
);
1628 bcm_bprintf(strbuf
, "CPU: \t\t");
1629 for (i
= 0; i
< num_cpus
; i
++)
1630 bcm_bprintf(strbuf
, "%d\t", i
);
1631 bcm_bprintf(strbuf
, "\nBin\n");
1633 for (i
= 0; i
< HIST_BIN_SIZE
; i
++) {
1634 bcm_bprintf(strbuf
, "%d:\t\t", 1<<i
);
1635 for (j
= 0; j
< num_cpus
; j
++) {
1636 bcm_bprintf(strbuf
, "%d\t", hist
[i
][j
]);
1638 bcm_bprintf(strbuf
, "\n");
1640 bcm_bprintf(strbuf
, "Per CPU Total \t");
1642 for (i
= 0; i
< num_cpus
; i
++) {
1643 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1644 per_cpu_total
[i
] += (hist
[j
][i
] * (1<<j
));
1646 bcm_bprintf(strbuf
, "%d\t", per_cpu_total
[i
]);
1647 total
+= per_cpu_total
[i
];
1649 bcm_bprintf(strbuf
, "\nTotal\t\t%d \n", total
);
1651 kfree(per_cpu_total
);
1655 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf
*strbuf
, uint32
*p
)
1657 int i
, num_cpus
= num_possible_cpus();
1659 bcm_bprintf(strbuf
, "CPU: \t");
1660 for (i
= 0; i
< num_cpus
; i
++)
1661 bcm_bprintf(strbuf
, "%d\t", i
);
1662 bcm_bprintf(strbuf
, "\n");
1664 bcm_bprintf(strbuf
, "Val: \t");
1665 for (i
= 0; i
< num_cpus
; i
++)
1666 bcm_bprintf(strbuf
, "%u\t", *(p
+i
));
1667 bcm_bprintf(strbuf
, "\n");
1671 void dhd_lb_stats_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
1675 if (dhdp
== NULL
|| strbuf
== NULL
) {
1676 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1677 __FUNCTION__
, dhdp
, strbuf
));
1683 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1687 bcm_bprintf(strbuf
, "\ncpu_online_cnt:\n");
1688 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_online_cnt
);
1690 bcm_bprintf(strbuf
, "\ncpu_offline_cnt:\n");
1691 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_offline_cnt
);
1693 bcm_bprintf(strbuf
, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1694 dhd
->dhd_dpc_cnt
, dhd
->napi_sched_cnt
, dhd
->rxc_sched_cnt
,
1695 dhd
->txc_sched_cnt
);
1698 bcm_bprintf(strbuf
, "\nnapi_percpu_run_cnt:\n");
1699 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->napi_percpu_run_cnt
);
1700 bcm_bprintf(strbuf
, "\nNAPI Packets Received Histogram:\n");
1701 dhd_lb_stats_dump_histo(strbuf
, dhd
->napi_rx_hist
);
1702 #endif /* DHD_LB_RXP */
1705 bcm_bprintf(strbuf
, "\nrxc_percpu_run_cnt:\n");
1706 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->rxc_percpu_run_cnt
);
1707 bcm_bprintf(strbuf
, "\nRX Completions (Buffer Post) Histogram:\n");
1708 dhd_lb_stats_dump_histo(strbuf
, dhd
->rxc_hist
);
1709 #endif /* DHD_LB_RXC */
1712 bcm_bprintf(strbuf
, "\ntxc_percpu_run_cnt:\n");
1713 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txc_percpu_run_cnt
);
1714 bcm_bprintf(strbuf
, "\nTX Completions (Buffer Free) Histogram:\n");
1715 dhd_lb_stats_dump_histo(strbuf
, dhd
->txc_hist
);
1716 #endif /* DHD_LB_TXC */
1719 bcm_bprintf(strbuf
, "\ntxp_percpu_run_cnt:\n");
1720 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txp_percpu_run_cnt
);
1722 bcm_bprintf(strbuf
, "\ntx_start_percpu_run_cnt:\n");
1723 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->tx_start_percpu_run_cnt
);
1724 #endif /* DHD_LB_TXP */
1726 bcm_bprintf(strbuf
, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n",
1727 DHD_LB_PRIMARY_CPUS
, DHD_LB_SECONDARY_CPUS
);
1729 bcm_bprintf(strbuf
, "napi_cpu %x tx_cpu %x\n",
1730 atomic_read(&dhd
->rx_napi_cpu
), atomic_read(&dhd
->tx_cpu
));
1734 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1735 static inline uint32
next_larger_power2(uint32 num
)
1747 static void dhd_lb_stats_update_histo(uint32
**bin
, uint32 count
, uint32 cpu
)
1751 bin_power
= next_larger_power2(count
);
1753 switch (bin_power
) {
1754 case 1: p
= bin
[0] + cpu
; break;
1755 case 2: p
= bin
[1] + cpu
; break;
1756 case 4: p
= bin
[2] + cpu
; break;
1757 case 8: p
= bin
[3] + cpu
; break;
1758 case 16: p
= bin
[4] + cpu
; break;
1759 case 32: p
= bin
[5] + cpu
; break;
1760 case 64: p
= bin
[6] + cpu
; break;
1761 case 128: p
= bin
[7] + cpu
; break;
1762 default : p
= bin
[8] + cpu
; break;
1769 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t
*dhdp
, uint32 count
)
1772 dhd_info_t
*dhd
= dhdp
->info
;
1776 dhd_lb_stats_update_histo(dhd
->napi_rx_hist
, count
, cpu
);
1781 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1784 dhd_info_t
*dhd
= dhdp
->info
;
1788 dhd_lb_stats_update_histo(dhd
->txc_hist
, count
, cpu
);
1793 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1796 dhd_info_t
*dhd
= dhdp
->info
;
1800 dhd_lb_stats_update_histo(dhd
->rxc_hist
, count
, cpu
);
1805 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1807 dhd_info_t
*dhd
= dhdp
->info
;
1808 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txc_percpu_run_cnt
);
1811 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1813 dhd_info_t
*dhd
= dhdp
->info
;
1814 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->rxc_percpu_run_cnt
);
1816 #endif /* DHD_LB_STATS */
1820 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1821 int g_frameburst
= 1;
1822 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1824 static int dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
);
1826 /* DHD Perimiter lock only used in router with bypass forwarding. */
1827 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1828 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1829 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1831 #ifdef PCIE_FULL_DONGLE
1832 #if defined(BCM_GMAC3)
1833 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1834 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1835 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1837 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1838 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1839 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1840 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1842 #else /* ! BCM_GMAC3 */
1843 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1844 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1845 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1846 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1847 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1849 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1850 static struct list_head
* dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
,
1851 struct list_head
*snapshot_list
);
1852 static void dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
);
1853 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1854 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1855 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1857 #endif /* ! BCM_GMAC3 */
1858 #endif /* PCIE_FULL_DONGLE */
1860 /* Control fw roaming */
1861 uint dhd_roam_disable
= 0;
1864 extern void dhd_dbgfs_init(dhd_pub_t
*dhdp
);
1865 extern void dhd_dbgfs_remove(void);
1869 /* Control radio state */
1870 uint dhd_radio_up
= 1;
1872 /* Network inteface name */
1873 char iface_name
[IFNAMSIZ
] = {'\0'};
1874 module_param_string(iface_name
, iface_name
, IFNAMSIZ
, 0);
1876 /* The following are specific to the SDIO dongle */
1878 /* IOCTL response timeout */
1879 int dhd_ioctl_timeout_msec
= IOCTL_RESP_TIMEOUT
;
1881 /* DS Exit response timeout */
1882 int ds_exit_timeout_msec
= DS_EXIT_TIMEOUT
;
1884 /* Idle timeout for backplane clock */
1885 int dhd_idletime
= DHD_IDLETIME_TICKS
;
1886 module_param(dhd_idletime
, int, 0);
1889 uint dhd_poll
= FALSE
;
1890 module_param(dhd_poll
, uint
, 0);
1892 /* Use interrupts */
1893 uint dhd_intr
= TRUE
;
1894 module_param(dhd_intr
, uint
, 0);
1896 /* SDIO Drive Strength (in milliamps) */
1897 uint dhd_sdiod_drive_strength
= 6;
1898 module_param(dhd_sdiod_drive_strength
, uint
, 0);
1902 extern uint dhd_txbound
;
1903 extern uint dhd_rxbound
;
1904 module_param(dhd_txbound
, uint
, 0);
1905 module_param(dhd_rxbound
, uint
, 0);
1907 /* Deferred transmits */
1908 extern uint dhd_deferred_tx
;
1909 module_param(dhd_deferred_tx
, uint
, 0);
1911 #endif /* BCMSDIO */
1915 /* Echo packet generator (pkts/s) */
1916 uint dhd_pktgen
= 0;
1917 module_param(dhd_pktgen
, uint
, 0);
1919 /* Echo packet len (0 => sawtooth, max 2040) */
1920 uint dhd_pktgen_len
= 0;
1921 module_param(dhd_pktgen_len
, uint
, 0);
1927 /* Allow delayed firmware download for debug purpose */
1928 int allow_delay_fwdl
= FALSE
;
1929 module_param(allow_delay_fwdl
, int, 0);
1930 #endif /* !BCMDBUS */
1932 extern char dhd_version
[];
1933 extern char fw_version
[];
1934 extern char clm_version
[];
1936 int dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
);
1937 static void dhd_net_if_lock_local(dhd_info_t
*dhd
);
1938 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
);
1939 static void dhd_suspend_lock(dhd_pub_t
*dhdp
);
1940 static void dhd_suspend_unlock(dhd_pub_t
*dhdp
);
1943 void htsf_update(dhd_info_t
*dhd
, void *data
);
1944 tsf_t prev_tsf
, cur_tsf
;
1946 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
);
1947 static int dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
);
1948 static void dhd_dump_latency(void);
1949 static void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
);
1950 static void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
);
1951 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
);
1952 #endif /* WLMEDIA_HTSF */
1954 /* Monitor interface */
1955 int dhd_monitor_init(void *dhd_pub
);
1956 int dhd_monitor_uninit(void);
1959 #if defined(WL_WIRELESS_EXT)
1960 struct iw_statistics
*dhd_get_wireless_stats(struct net_device
*dev
);
1961 #endif /* defined(WL_WIRELESS_EXT) */
1964 static void dhd_dpc(ulong data
);
1965 #endif /* !BCMDBUS */
1967 extern int dhd_wait_pend8021x(struct net_device
*dev
);
1968 void dhd_os_wd_timer_extend(void *bus
, bool extend
);
1972 #error TOE requires BDC
1974 static int dhd_toe_get(dhd_info_t
*dhd
, int idx
, uint32
*toe_ol
);
1975 static int dhd_toe_set(dhd_info_t
*dhd
, int idx
, uint32 toe_ol
);
1978 static int dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
1979 wl_event_msg_t
*event_ptr
, void **data_ptr
);
1981 #if defined(CONFIG_PM_SLEEP)
1982 static int dhd_pm_callback(struct notifier_block
*nfb
, unsigned long action
, void *ignored
)
1984 int ret
= NOTIFY_DONE
;
1985 bool suspend
= FALSE
;
1987 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1988 #pragma GCC diagnostic push
1989 #pragma GCC diagnostic ignored "-Wcast-qual"
1991 dhd_info_t
*dhdinfo
= (dhd_info_t
*)container_of(nfb
, struct dhd_info
, pm_notifier
);
1992 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1993 #pragma GCC diagnostic pop
1996 BCM_REFERENCE(dhdinfo
);
1997 BCM_REFERENCE(suspend
);
2000 case PM_HIBERNATION_PREPARE
:
2001 case PM_SUSPEND_PREPARE
:
2005 case PM_POST_HIBERNATION
:
2006 case PM_POST_SUSPEND
:
2011 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
2013 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo
->pub
);
2014 dhd_wlfc_suspend(&dhdinfo
->pub
);
2015 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo
->pub
);
2017 dhd_wlfc_resume(&dhdinfo
->pub
);
2019 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
2021 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
2022 KERNEL_VERSION(2, 6, 39))
2023 dhd_mmc_suspend
= suspend
;
2030 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
2031 * created in kernel notifier link list (with 'next' pointing to itself)
2033 static bool dhd_pm_notifier_registered
= FALSE
;
2035 extern int register_pm_notifier(struct notifier_block
*nb
);
2036 extern int unregister_pm_notifier(struct notifier_block
*nb
);
2037 #endif /* CONFIG_PM_SLEEP */
2039 /* Request scheduling of the bus rx frame */
2040 static void dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
);
2041 static void dhd_os_rxflock(dhd_pub_t
*pub
);
2042 static void dhd_os_rxfunlock(dhd_pub_t
*pub
);
2044 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
2045 typedef struct dhd_dev_priv
{
2046 dhd_info_t
* dhd
; /* cached pointer to dhd_info in netdevice priv */
2047 dhd_if_t
* ifp
; /* cached pointer to dhd_if in netdevice priv */
2048 int ifidx
; /* interface index */
2052 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
2053 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
2054 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
2055 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
2056 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
2057 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
2059 #if defined(DHD_OF_SUPPORT)
2060 extern int dhd_wlan_init(void);
2061 #endif /* defined(DHD_OF_SUPPORT) */
2062 /** Clear the dhd net_device's private structure. */
2064 dhd_dev_priv_clear(struct net_device
* dev
)
2066 dhd_dev_priv_t
* dev_priv
;
2067 ASSERT(dev
!= (struct net_device
*)NULL
);
2068 dev_priv
= DHD_DEV_PRIV(dev
);
2069 dev_priv
->dhd
= (dhd_info_t
*)NULL
;
2070 dev_priv
->ifp
= (dhd_if_t
*)NULL
;
2071 dev_priv
->ifidx
= DHD_BAD_IF
;
2072 dev_priv
->lkup
= (void *)NULL
;
2075 /** Setup the dhd net_device's private structure. */
2077 dhd_dev_priv_save(struct net_device
* dev
, dhd_info_t
* dhd
, dhd_if_t
* ifp
,
2080 dhd_dev_priv_t
* dev_priv
;
2081 ASSERT(dev
!= (struct net_device
*)NULL
);
2082 dev_priv
= DHD_DEV_PRIV(dev
);
2083 dev_priv
->dhd
= dhd
;
2084 dev_priv
->ifp
= ifp
;
2085 dev_priv
->ifidx
= ifidx
;
2088 #ifdef PCIE_FULL_DONGLE
2090 /** Dummy objects are defined with state representing bad|down.
2091 * Performance gains from reducing branch conditionals, instruction parallelism,
2092 * dual issue, reducing load shadows, avail of larger pipelines.
2093 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
2094 * is accessed via the dhd_sta_t.
2097 /* Dummy dhd_info object */
2098 dhd_info_t dhd_info_null
= {
2099 #if defined(BCM_GMAC3)
2103 .info
= &dhd_info_null
,
2104 #ifdef DHDTCPACK_SUPPRESS
2105 .tcpack_sup_mode
= TCPACK_SUP_REPLACE
,
2106 #endif /* DHDTCPACK_SUPPRESS */
2107 #if defined(TRAFFIC_MGMT_DWM)
2108 .dhd_tm_dwm_tbl
= { .dhd_dwm_enabled
= TRUE
},
2111 .busstate
= DHD_BUS_DOWN
2114 #define DHD_INFO_NULL (&dhd_info_null)
2115 #define DHD_PUB_NULL (&dhd_info_null.pub)
2117 /* Dummy netdevice object */
2118 struct net_device dhd_net_dev_null
= {
2119 .reg_state
= NETREG_UNREGISTERED
2121 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
2123 /* Dummy dhd_if object */
2124 dhd_if_t dhd_if_null
= {
2125 #if defined(BCM_GMAC3)
2129 .wmf
= { .wmf_enable
= TRUE
},
2131 .info
= DHD_INFO_NULL
,
2132 .net
= DHD_NET_DEV_NULL
,
2135 #define DHD_IF_NULL (&dhd_if_null)
2137 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
2139 /** Interface STA list management. */
2141 /** Fetch the dhd_if object, given the interface index in the dhd. */
2142 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
);
2144 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
2145 static void dhd_sta_free(dhd_pub_t
*pub
, dhd_sta_t
*sta
);
2146 static dhd_sta_t
* dhd_sta_alloc(dhd_pub_t
* dhdp
);
2148 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
2149 static void dhd_if_del_sta_list(dhd_if_t
* ifp
);
2150 static void dhd_if_flush_sta(dhd_if_t
* ifp
);
2152 /* Construct/Destruct a sta pool. */
2153 static int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
);
2154 static void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
);
2155 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2156 static void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
);
2159 /* Return interface pointer */
2160 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
)
2162 ASSERT(ifidx
< DHD_MAX_IFS
);
2164 if (ifidx
>= DHD_MAX_IFS
)
2167 return dhdp
->info
->iflist
[ifidx
];
2170 /** Reset a dhd_sta object and free into the dhd pool. */
2172 dhd_sta_free(dhd_pub_t
* dhdp
, dhd_sta_t
* sta
)
2176 ASSERT((sta
!= DHD_STA_NULL
) && (sta
->idx
!= ID16_INVALID
));
2178 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
2181 * Flush and free all packets in all flowring's queues belonging to sta.
2182 * Packets in flow ring will be flushed later.
2184 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
2185 uint16 flowid
= sta
->flowid
[prio
];
2187 if (flowid
!= FLOWID_INVALID
) {
2188 unsigned long flags
;
2189 flow_queue_t
* queue
= dhd_flow_queue(dhdp
, flowid
);
2190 flow_ring_node_t
* flow_ring_node
;
2192 #ifdef DHDTCPACK_SUPPRESS
2193 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
2194 * when there is a newly coming packet from network stack.
2196 dhd_tcpack_info_tbl_clean(dhdp
);
2197 #endif /* DHDTCPACK_SUPPRESS */
2199 flow_ring_node
= dhd_flow_ring_node(dhdp
, flowid
);
2200 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
2201 flow_ring_node
->status
= FLOW_RING_STATUS_STA_FREEING
;
2203 if (!DHD_FLOW_QUEUE_EMPTY(queue
)) {
2205 while ((pkt
= dhd_flow_queue_dequeue(dhdp
, queue
)) != NULL
) {
2206 PKTFREE(dhdp
->osh
, pkt
, TRUE
);
2210 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
2211 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
2214 sta
->flowid
[prio
] = FLOWID_INVALID
;
2217 id16_map_free(dhdp
->staid_allocator
, sta
->idx
);
2218 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
2219 sta
->ifp
= DHD_IF_NULL
; /* dummy dhd_if object */
2220 sta
->ifidx
= DHD_BAD_IF
;
2221 bzero(sta
->ea
.octet
, ETHER_ADDR_LEN
);
2222 INIT_LIST_HEAD(&sta
->list
);
2223 sta
->idx
= ID16_INVALID
; /* implying free */
2226 /** Allocate a dhd_sta object from the dhd pool. */
2228 dhd_sta_alloc(dhd_pub_t
* dhdp
)
2232 dhd_sta_pool_t
* sta_pool
;
2234 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
2236 idx
= id16_map_alloc(dhdp
->staid_allocator
);
2237 if (idx
== ID16_INVALID
) {
2238 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__
));
2239 return DHD_STA_NULL
;
2242 sta_pool
= (dhd_sta_pool_t
*)(dhdp
->sta_pool
);
2243 sta
= &sta_pool
[idx
];
2245 ASSERT((sta
->idx
== ID16_INVALID
) &&
2246 (sta
->ifp
== DHD_IF_NULL
) && (sta
->ifidx
== DHD_BAD_IF
));
2248 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
2250 sta
->idx
= idx
; /* implying allocated */
2255 /** Delete all STAs in an interface's STA list. */
2257 dhd_if_del_sta_list(dhd_if_t
*ifp
)
2259 dhd_sta_t
*sta
, *next
;
2260 unsigned long flags
;
2262 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2263 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2264 #pragma GCC diagnostic push
2265 #pragma GCC diagnostic ignored "-Wcast-qual"
2267 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2268 #if defined(BCM_GMAC3)
2270 /* Remove sta from WOFA forwarder. */
2271 fwder_deassoc(ifp
->fwdh
, (uint16
*)(sta
->ea
.octet
), (uintptr_t)sta
);
2273 #endif /* BCM_GMAC3 */
2274 list_del(&sta
->list
);
2275 dhd_sta_free(&ifp
->info
->pub
, sta
);
2277 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2278 #pragma GCC diagnostic pop
2280 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2285 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
2287 dhd_if_flush_sta(dhd_if_t
* ifp
)
2289 #if defined(BCM_GMAC3)
2291 if (ifp
&& (ifp
->fwdh
!= FWDER_NULL
)) {
2292 dhd_sta_t
*sta
, *next
;
2293 unsigned long flags
;
2295 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2297 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2298 /* Remove any sta entry from WOFA forwarder. */
2299 fwder_flush(ifp
->fwdh
, (uintptr_t)sta
);
2302 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2304 #endif /* BCM_GMAC3 */
2307 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
2309 dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
)
2311 int idx
, prio
, sta_pool_memsz
;
2313 dhd_sta_pool_t
* sta_pool
;
2314 void * staid_allocator
;
2316 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
2317 ASSERT((dhdp
->staid_allocator
== NULL
) && (dhdp
->sta_pool
== NULL
));
2319 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2320 staid_allocator
= id16_map_init(dhdp
->osh
, max_sta
, 1);
2321 if (staid_allocator
== NULL
) {
2322 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__
));
2326 /* Pre allocate a pool of dhd_sta objects (one extra). */
2327 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
)); /* skip idx 0 */
2328 sta_pool
= (dhd_sta_pool_t
*)MALLOC(dhdp
->osh
, sta_pool_memsz
);
2329 if (sta_pool
== NULL
) {
2330 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__
));
2331 id16_map_fini(dhdp
->osh
, staid_allocator
);
2335 dhdp
->sta_pool
= sta_pool
;
2336 dhdp
->staid_allocator
= staid_allocator
;
2338 /* Initialize all sta(s) for the pre-allocated free pool. */
2339 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
2340 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
2341 sta
= &sta_pool
[idx
];
2342 sta
->idx
= id16_map_alloc(staid_allocator
);
2343 ASSERT(sta
->idx
<= max_sta
);
2345 /* Now place them into the pre-allocated free pool. */
2346 for (idx
= 1; idx
<= max_sta
; idx
++) {
2347 sta
= &sta_pool
[idx
];
2348 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
2349 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
2351 dhd_sta_free(dhdp
, sta
);
2357 /** Destruct the pool of dhd_sta_t objects.
2358 * Caller must ensure that no STA objects are currently associated with an if.
2361 dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
)
2363 dhd_sta_pool_t
* sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
2367 int sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
2368 for (idx
= 1; idx
<= max_sta
; idx
++) {
2369 ASSERT(sta_pool
[idx
].ifp
== DHD_IF_NULL
);
2370 ASSERT(sta_pool
[idx
].idx
== ID16_INVALID
);
2372 MFREE(dhdp
->osh
, dhdp
->sta_pool
, sta_pool_memsz
);
2373 dhdp
->sta_pool
= NULL
;
2376 id16_map_fini(dhdp
->osh
, dhdp
->staid_allocator
);
2377 dhdp
->staid_allocator
= NULL
;
2380 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2382 dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
)
2384 int idx
, prio
, sta_pool_memsz
;
2386 dhd_sta_pool_t
* sta_pool
;
2387 void *staid_allocator
;
2390 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
2394 sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
2395 staid_allocator
= dhdp
->staid_allocator
;
2398 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__
));
2402 if (!staid_allocator
) {
2403 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__
));
2407 /* clear free pool */
2408 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
2409 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
2411 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2412 id16_map_clear(staid_allocator
, max_sta
, 1);
2414 /* Initialize all sta(s) for the pre-allocated free pool. */
2415 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
2416 sta
= &sta_pool
[idx
];
2417 sta
->idx
= id16_map_alloc(staid_allocator
);
2418 ASSERT(sta
->idx
<= max_sta
);
2420 /* Now place them into the pre-allocated free pool. */
2421 for (idx
= 1; idx
<= max_sta
; idx
++) {
2422 sta
= &sta_pool
[idx
];
2423 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
2424 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
2426 dhd_sta_free(dhdp
, sta
);
2430 /** Find STA with MAC address ea in an interface's STA list. */
2432 dhd_find_sta(void *pub
, int ifidx
, void *ea
)
2436 unsigned long flags
;
2439 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2441 return DHD_STA_NULL
;
2443 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2444 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2445 #pragma GCC diagnostic push
2446 #pragma GCC diagnostic ignored "-Wcast-qual"
2448 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
2449 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
2450 DHD_INFO(("%s: found STA " MACDBG
"\n",
2451 __FUNCTION__
, MAC2STRDBG((char *)ea
)));
2452 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2456 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2457 #pragma GCC diagnostic pop
2459 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2461 return DHD_STA_NULL
;
2464 /** Add STA into the interface's STA list. */
2466 dhd_add_sta(void *pub
, int ifidx
, void *ea
)
2470 unsigned long flags
;
2473 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2475 return DHD_STA_NULL
;
2477 sta
= dhd_sta_alloc((dhd_pub_t
*)pub
);
2478 if (sta
== DHD_STA_NULL
) {
2479 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__
));
2480 return DHD_STA_NULL
;
2483 memcpy(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
);
2485 /* link the sta and the dhd interface */
2489 sta
->psta_prim
= NULL
;
2491 INIT_LIST_HEAD(&sta
->list
);
2493 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2495 list_add_tail(&sta
->list
, &ifp
->sta_list
);
2497 #if defined(BCM_GMAC3)
2499 ASSERT(ISALIGNED(ea
, 2));
2500 /* Add sta to WOFA forwarder. */
2501 fwder_reassoc(ifp
->fwdh
, (uint16
*)ea
, (uintptr_t)sta
);
2503 #endif /* BCM_GMAC3 */
2505 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2510 /** Delete all STAs from the interface's STA list. */
2512 dhd_del_all_sta(void *pub
, int ifidx
)
2514 dhd_sta_t
*sta
, *next
;
2516 unsigned long flags
;
2518 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2522 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2523 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2524 #pragma GCC diagnostic push
2525 #pragma GCC diagnostic ignored "-Wcast-qual"
2527 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2528 #if defined(BCM_GMAC3)
2529 if (ifp
->fwdh
) { /* Found a sta, remove from WOFA forwarder. */
2530 ASSERT(ISALIGNED(sta
->ea
.octet
, 2));
2531 fwder_deassoc(ifp
->fwdh
, (uint16
*)sta
->ea
.octet
, (uintptr_t)sta
);
2533 #endif /* BCM_GMAC3 */
2535 list_del(&sta
->list
);
2536 dhd_sta_free(&ifp
->info
->pub
, sta
);
2537 #ifdef DHD_L2_FILTER
2538 if (ifp
->parp_enable
) {
2539 /* clear Proxy ARP cache of specific Ethernet Address */
2540 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
,
2541 ifp
->phnd_arp_table
, FALSE
,
2542 sta
->ea
.octet
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2544 #endif /* DHD_L2_FILTER */
2546 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2547 #pragma GCC diagnostic pop
2549 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2554 /** Delete STA from the interface's STA list. */
2556 dhd_del_sta(void *pub
, int ifidx
, void *ea
)
2558 dhd_sta_t
*sta
, *next
;
2560 unsigned long flags
;
2561 char macstr
[ETHER_ADDR_STR_LEN
];
2564 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2568 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2569 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2570 #pragma GCC diagnostic push
2571 #pragma GCC diagnostic ignored "-Wcast-qual"
2573 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2574 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
2575 #if defined(BCM_GMAC3)
2576 if (ifp
->fwdh
) { /* Found a sta, remove from WOFA forwarder. */
2577 ASSERT(ISALIGNED(ea
, 2));
2578 fwder_deassoc(ifp
->fwdh
, (uint16
*)ea
, (uintptr_t)sta
);
2580 #endif /* BCM_GMAC3 */
2581 DHD_MAC_TO_STR(((char *)ea
), macstr
);
2582 DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__
, macstr
));
2583 list_del(&sta
->list
);
2584 dhd_sta_free(&ifp
->info
->pub
, sta
);
2587 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2588 #pragma GCC diagnostic pop
2590 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2591 #ifdef DHD_L2_FILTER
2592 if (ifp
->parp_enable
) {
2593 /* clear Proxy ARP cache of specific Ethernet Address */
2594 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
, ifp
->phnd_arp_table
, FALSE
,
2595 ea
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2597 #endif /* DHD_L2_FILTER */
2601 /** Add STA if it doesn't exist. Not reentrant. */
2603 dhd_findadd_sta(void *pub
, int ifidx
, void *ea
)
2607 sta
= dhd_find_sta(pub
, ifidx
, ea
);
2611 sta
= dhd_add_sta(pub
, ifidx
, ea
);
2617 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2618 #if !defined(BCM_GMAC3)
2619 static struct list_head
*
2620 dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
, struct list_head
*snapshot_list
)
2622 unsigned long flags
;
2623 dhd_sta_t
*sta
, *snapshot
;
2625 INIT_LIST_HEAD(snapshot_list
);
2627 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2629 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
2630 /* allocate one and add to snapshot */
2631 snapshot
= (dhd_sta_t
*)MALLOC(dhd
->pub
.osh
, sizeof(dhd_sta_t
));
2632 if (snapshot
== NULL
) {
2633 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__
));
2637 memcpy(snapshot
->ea
.octet
, sta
->ea
.octet
, ETHER_ADDR_LEN
);
2639 INIT_LIST_HEAD(&snapshot
->list
);
2640 list_add_tail(&snapshot
->list
, snapshot_list
);
2643 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2645 return snapshot_list
;
2649 dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
)
2651 dhd_sta_t
*sta
, *next
;
2653 list_for_each_entry_safe(sta
, next
, snapshot_list
, list
) {
2654 list_del(&sta
->list
);
2655 MFREE(dhd
->pub
.osh
, sta
, sizeof(dhd_sta_t
));
2658 #endif /* !BCM_GMAC3 */
2659 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2662 static inline void dhd_if_flush_sta(dhd_if_t
* ifp
) { }
2663 static inline void dhd_if_del_sta_list(dhd_if_t
*ifp
) {}
2664 static inline int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
) { return BCME_OK
; }
2665 static inline void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
) {}
2666 static inline void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
) {}
2667 dhd_sta_t
*dhd_findadd_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2668 dhd_sta_t
*dhd_find_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2669 void dhd_del_sta(void *pub
, int ifidx
, void *ea
) {}
2670 #endif /* PCIE_FULL_DONGLE */
2676 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP)
2678 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2679 * CPU and schedules a tasklet.
2680 * @tasklet: opaque pointer to the tasklet
2683 dhd_tasklet_schedule(void *tasklet
)
2685 tasklet_schedule((struct tasklet_struct
*)tasklet
);
2688 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2689 * @tasklet: tasklet to be scheduled
2690 * @on_cpu: cpu core id
2692 * If the requested cpu is online, then an IPI is sent to this cpu via the
2693 * smp_call_function_single with no wait and the tasklet_schedule function
2694 * will be invoked to schedule the specified tasklet on the requested CPU.
2697 dhd_tasklet_schedule_on(struct tasklet_struct
*tasklet
, int on_cpu
)
2700 smp_call_function_single(on_cpu
,
2701 dhd_tasklet_schedule
, (void *)tasklet
, wait
);
2705 * dhd_work_schedule_on - Executes the passed work in a given CPU
2706 * @work: work to be scheduled
2707 * @on_cpu: cpu core id
2709 * If the requested cpu is online, then an IPI is sent to this cpu via the
2710 * schedule_work_on and the work function
2711 * will be invoked to schedule the specified work on the requested CPU.
2715 dhd_work_schedule_on(struct work_struct
*work
, int on_cpu
)
2717 schedule_work_on(on_cpu
, work
);
2719 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */
2721 #if defined(DHD_LB_TXC)
2723 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2724 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2725 * freeing the packets placed in the tx_compl workq
2728 dhd_lb_tx_compl_dispatch(dhd_pub_t
*dhdp
)
2730 dhd_info_t
*dhd
= dhdp
->info
;
2731 int curr_cpu
, on_cpu
;
2733 if (dhd
->rx_napi_netdev
== NULL
) {
2734 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2738 DHD_LB_STATS_INCR(dhd
->txc_sched_cnt
);
2740 * If the destination CPU is NOT online or is same as current CPU
2741 * no need to schedule the work
2743 curr_cpu
= get_cpu();
2746 on_cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2748 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2749 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2751 schedule_work(&dhd
->tx_compl_dispatcher_work
);
2755 static void dhd_tx_compl_dispatcher_fn(struct work_struct
* work
)
2757 struct dhd_info
*dhd
=
2758 container_of(work
, struct dhd_info
, tx_compl_dispatcher_work
);
2762 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2763 if (!cpu_online(cpu
))
2764 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2766 dhd_tasklet_schedule_on(&dhd
->tx_compl_tasklet
, cpu
);
2769 #endif /* DHD_LB_TXC */
2771 #if defined(DHD_LB_RXC)
2773 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2774 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2775 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2776 * placed in the rx_compl workq.
2778 * @dhdp: pointer to dhd_pub object
2781 dhd_lb_rx_compl_dispatch(dhd_pub_t
*dhdp
)
2783 dhd_info_t
*dhd
= dhdp
->info
;
2784 int curr_cpu
, on_cpu
;
2786 if (dhd
->rx_napi_netdev
== NULL
) {
2787 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2791 DHD_LB_STATS_INCR(dhd
->rxc_sched_cnt
);
2793 * If the destination CPU is NOT online or is same as current CPU
2794 * no need to schedule the work
2796 curr_cpu
= get_cpu();
2798 on_cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2800 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2801 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2803 dhd_rx_compl_dispatcher_fn(dhdp
);
2807 static void dhd_rx_compl_dispatcher_fn(dhd_pub_t
*dhdp
)
2809 struct dhd_info
*dhd
= dhdp
->info
;
2813 cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2814 if (!cpu_online(cpu
))
2815 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2817 dhd_tasklet_schedule_on(&dhd
->rx_compl_tasklet
, cpu
);
2821 #endif /* DHD_LB_RXC */
2823 #if defined(DHD_LB_TXP)
2824 static void dhd_tx_dispatcher_work(struct work_struct
* work
)
2826 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2827 #pragma GCC diagnostic push
2828 #pragma GCC diagnostic ignored "-Wcast-qual"
2830 struct dhd_info
*dhd
=
2831 container_of(work
, struct dhd_info
, tx_dispatcher_work
);
2832 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2833 #pragma GCC diagnostic pop
2835 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2838 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
)
2842 dhd_info_t
*dhd
= dhdp
->info
;
2845 cpu
= atomic_read(&dhd
->tx_cpu
);
2846 net_tx_cpu
= atomic_read(&dhd
->net_tx_cpu
);
2849 * Now if the NET_TX has pushed the packet in the same
2850 * CPU that is chosen for Tx processing, seperate it out
2851 * i.e run the TX processing tasklet in compl_cpu
2853 if (net_tx_cpu
== cpu
)
2854 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2856 if (!cpu_online(cpu
)) {
2858 * Ooohh... but the Chosen CPU is not online,
2859 * Do the job in the current CPU itself.
2861 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2864 * Schedule tx_dispatcher_work to on the cpu which
2865 * in turn will schedule tx_tasklet.
2867 dhd_work_schedule_on(&dhd
->tx_dispatcher_work
, cpu
);
2873 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2874 * on another cpu. The tx_tasklet will take care of actually putting
2875 * the skbs into appropriate flow ring and ringing H2D interrupt
2877 * @dhdp: pointer to dhd_pub object
2880 dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
)
2882 dhd_info_t
*dhd
= dhdp
->info
;
2885 curr_cpu
= get_cpu();
2888 /* Record the CPU in which the TX request from Network stack came */
2889 atomic_set(&dhd
->net_tx_cpu
, curr_cpu
);
2891 /* Schedule the work to dispatch ... */
2892 dhd_tx_dispatcher_fn(dhdp
);
2895 #endif /* DHD_LB_TXP */
2897 #if defined(DHD_LB_RXP)
2899 * dhd_napi_poll - Load balance napi poll function to process received
2900 * packets and send up the network stack using netif_receive_skb()
2902 * @napi: napi object in which context this poll function is invoked
2903 * @budget: number of packets to be processed.
2905 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2906 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2907 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2908 * packet tag and sendup.
2911 dhd_napi_poll(struct napi_struct
*napi
, int budget
)
2914 const int pkt_count
= 1;
2916 struct sk_buff
* skb
;
2917 unsigned long flags
;
2918 struct dhd_info
*dhd
;
2920 struct sk_buff_head rx_process_queue
;
2922 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2923 #pragma GCC diagnostic push
2924 #pragma GCC diagnostic ignored "-Wcast-qual"
2926 dhd
= container_of(napi
, struct dhd_info
, rx_napi_struct
);
2927 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2928 #pragma GCC diagnostic pop
2931 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2932 __FUNCTION__
, skb_queue_len(&dhd
->rx_napi_queue
), budget
));
2933 __skb_queue_head_init(&rx_process_queue
);
2935 /* extract the entire rx_napi_queue into local rx_process_queue */
2936 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
2937 skb_queue_splice_tail_init(&dhd
->rx_napi_queue
, &rx_process_queue
);
2938 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
2940 while ((skb
= __skb_dequeue(&rx_process_queue
)) != NULL
) {
2941 OSL_PREFETCH(skb
->data
);
2943 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
2945 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2946 __FUNCTION__
, skb
, ifid
));
2948 dhd_rx_frame(&dhd
->pub
, ifid
, skb
, pkt_count
, chan
);
2952 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd
->pub
, processed
);
2954 DHD_INFO(("%s processed %d\n", __FUNCTION__
, processed
));
2955 napi_complete(napi
);
2961 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2962 * poll list. This function may be invoked via the smp_call_function_single
2963 * from a remote CPU.
2965 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2966 * after the napi_struct is added to the softnet data's poll_list
2968 * @info: pointer to a dhd_info struct
2971 dhd_napi_schedule(void *info
)
2973 dhd_info_t
*dhd
= (dhd_info_t
*)info
;
2975 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2976 __FUNCTION__
, &dhd
->rx_napi_struct
, atomic_read(&dhd
->rx_napi_cpu
)));
2978 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2979 if (napi_schedule_prep(&dhd
->rx_napi_struct
)) {
2980 __napi_schedule(&dhd
->rx_napi_struct
);
2981 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->napi_percpu_run_cnt
);
2985 * If the rx_napi_struct was already running, then we let it complete
2986 * processing all its packets. The rx_napi_struct may only run on one
2987 * core at a time, to avoid out-of-order handling.
2992 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2993 * action after placing the dhd's rx_process napi object in the the remote CPU's
2994 * softnet data's poll_list.
2996 * @dhd: dhd_info which has the rx_process napi object
2997 * @on_cpu: desired remote CPU id
3000 dhd_napi_schedule_on(dhd_info_t
*dhd
, int on_cpu
)
3002 int wait
= 0; /* asynchronous IPI */
3003 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
3004 __FUNCTION__
, dhd
, &dhd
->rx_napi_struct
, on_cpu
));
3006 if (smp_call_function_single(on_cpu
, dhd_napi_schedule
, dhd
, wait
)) {
3007 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
3008 __FUNCTION__
, on_cpu
));
3011 DHD_LB_STATS_INCR(dhd
->napi_sched_cnt
);
3017 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
3018 * Why should we do this?
3019 * The candidacy algorithm is run from the call back function
3020 * registered to CPU hotplug notifier. This call back happens from Worker
3021 * context. The dhd_napi_schedule_on is also from worker context.
3022 * Note that both of this can run on two different CPUs at the same time.
3023 * So we can possibly have a window where a given CPUn is being brought
3024 * down from CPUm while we try to run a function on CPUn.
3025 * To prevent this its better have the whole code to execute an SMP
3026 * function under get_online_cpus.
3027 * This function call ensures that hotplug mechanism does not kick-in
3028 * until we are done dealing with online CPUs
3029 * If the hotplug worker is already running, no worries because the
3030 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
3032 * The below mentioned code structure is proposed in
3033 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
3035 * Q: I need to ensure that a particular cpu is not removed when there is some
3036 * work specific to this cpu is in progress
3038 * According to the documentation calling get_online_cpus is NOT required, if
3039 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
3040 * run from Work Queue context we have to call these functions
3042 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
)
3044 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3045 #pragma GCC diagnostic push
3046 #pragma GCC diagnostic ignored "-Wcast-qual"
3048 struct dhd_info
*dhd
=
3049 container_of(work
, struct dhd_info
, rx_napi_dispatcher_work
);
3050 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3051 #pragma GCC diagnostic pop
3056 cpu
= atomic_read(&dhd
->rx_napi_cpu
);
3058 if (!cpu_online(cpu
))
3059 dhd_napi_schedule(dhd
);
3061 dhd_napi_schedule_on(dhd
, cpu
);
3067 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
3068 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
3069 * the packets enqueued into the rx_napi_queue and sendup.
3070 * The producer's rx packet queue is appended to the rx_napi_queue before
3071 * dispatching the rx_napi_struct.
3074 dhd_lb_rx_napi_dispatch(dhd_pub_t
*dhdp
)
3076 unsigned long flags
;
3077 dhd_info_t
*dhd
= dhdp
->info
;
3081 if (dhd
->rx_napi_netdev
== NULL
) {
3082 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
3086 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__
,
3087 skb_queue_len(&dhd
->rx_napi_queue
), skb_queue_len(&dhd
->rx_pend_queue
)));
3089 /* append the producer's queue of packets to the napi's rx process queue */
3090 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
3091 skb_queue_splice_tail_init(&dhd
->rx_pend_queue
, &dhd
->rx_napi_queue
);
3092 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
3095 * If the destination CPU is NOT online or is same as current CPU
3096 * no need to schedule the work
3098 curr_cpu
= get_cpu();
3101 on_cpu
= atomic_read(&dhd
->rx_napi_cpu
);
3102 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
3103 dhd_napi_schedule(dhd
);
3105 schedule_work(&dhd
->rx_napi_dispatcher_work
);
3110 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
3113 dhd_lb_rx_pkt_enqueue(dhd_pub_t
*dhdp
, void *pkt
, int ifidx
)
3115 dhd_info_t
*dhd
= dhdp
->info
;
3117 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__
,
3118 pkt
, ifidx
, skb_queue_len(&dhd
->rx_pend_queue
)));
3119 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pkt
), ifidx
);
3120 __skb_queue_tail(&dhd
->rx_pend_queue
, pkt
);
3122 #endif /* DHD_LB_RXP */
3127 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
3128 int dhd_bssidx2idx(dhd_pub_t
*dhdp
, uint32 bssidx
)
3131 dhd_info_t
*dhd
= dhdp
->info
;
3134 ASSERT(bssidx
< DHD_MAX_IFS
);
3137 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3138 ifp
= dhd
->iflist
[i
];
3139 if (ifp
&& (ifp
->bssidx
== bssidx
)) {
3140 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
3141 ifp
->name
, bssidx
, i
));
3148 static inline int dhd_rxf_enqueue(dhd_pub_t
*dhdp
, void* skb
)
3154 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
3158 dhd_os_rxflock(dhdp
);
3159 store_idx
= dhdp
->store_idx
;
3160 sent_idx
= dhdp
->sent_idx
;
3161 if (dhdp
->skbbuf
[store_idx
] != NULL
) {
3162 /* Make sure the previous packets are processed */
3163 dhd_os_rxfunlock(dhdp
);
3164 #ifdef RXF_DEQUEUE_ON_BUSY
3165 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3166 skb
, store_idx
, sent_idx
));
3168 #else /* RXF_DEQUEUE_ON_BUSY */
3169 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3170 skb
, store_idx
, sent_idx
));
3171 /* removed msleep here, should use wait_event_timeout if we
3172 * want to give rx frame thread a chance to run
3174 #if defined(WAIT_DEQUEUE)
3178 #endif /* RXF_DEQUEUE_ON_BUSY */
3180 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
3181 skb
, store_idx
, (store_idx
+ 1) & (MAXSKBPEND
- 1)));
3182 dhdp
->skbbuf
[store_idx
] = skb
;
3183 dhdp
->store_idx
= (store_idx
+ 1) & (MAXSKBPEND
- 1);
3184 dhd_os_rxfunlock(dhdp
);
3189 static inline void* dhd_rxf_dequeue(dhd_pub_t
*dhdp
)
3195 dhd_os_rxflock(dhdp
);
3197 store_idx
= dhdp
->store_idx
;
3198 sent_idx
= dhdp
->sent_idx
;
3199 skb
= dhdp
->skbbuf
[sent_idx
];
3202 dhd_os_rxfunlock(dhdp
);
3203 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
3204 store_idx
, sent_idx
));
3208 dhdp
->skbbuf
[sent_idx
] = NULL
;
3209 dhdp
->sent_idx
= (sent_idx
+ 1) & (MAXSKBPEND
- 1);
3211 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
3214 dhd_os_rxfunlock(dhdp
);
3219 int dhd_process_cid_mac(dhd_pub_t
*dhdp
, bool prepost
)
3221 if (prepost
) { /* pre process */
3223 dhd_check_module_cid(dhdp
);
3224 dhd_check_module_mac(dhdp
);
3225 dhd_set_macaddr_from_file(dhdp
);
3226 } else { /* post process */
3227 dhd_write_macaddr(&dhdp
->mac
);
3228 dhd_clear_cis(dhdp
);
3234 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3235 #if defined(PKT_FILTER_SUPPORT)
3236 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3238 _turn_on_arp_filter(dhd_pub_t
*dhd
, int op_mode_param
)
3240 bool _apply
= FALSE
;
3241 /* In case of IBSS mode, apply arp pkt filter */
3242 if (op_mode_param
& DHD_FLAG_IBSS_MODE
) {
3246 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
3247 if (op_mode_param
& (DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
)) {
3255 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3258 dhd_set_packet_filter(dhd_pub_t
*dhd
)
3262 DHD_TRACE(("%s: enter\n", __FUNCTION__
));
3263 if (dhd_pkt_filter_enable
) {
3264 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
3265 dhd_pktfilter_offload_set(dhd
, dhd
->pktfilter
[i
]);
3271 dhd_enable_packet_filter(int value
, dhd_pub_t
*dhd
)
3275 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__
, value
));
3276 if ((dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) && value
) {
3277 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__
));
3280 /* 1 - Enable packet filter, only allow unicast packet to send up */
3281 /* 0 - Disable packet filter */
3282 if (dhd_pkt_filter_enable
&& (!value
||
3283 (dhd_support_sta_mode(dhd
) && !dhd
->dhcp_in_progress
)))
3285 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
3286 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3287 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3288 if (value
&& (i
== DHD_ARP_FILTER_NUM
) &&
3289 !_turn_on_arp_filter(dhd
, dhd
->op_mode
)) {
3290 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
3291 "val %d, cnt %d, op_mode 0x%x\n",
3292 value
, i
, dhd
->op_mode
));
3295 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3296 dhd_pktfilter_offload_enable(dhd
, dhd
->pktfilter
[i
],
3297 value
, dhd_master_mode
);
3303 dhd_packet_filter_add_remove(dhd_pub_t
*dhdp
, int add_remove
, int num
)
3305 char *filterp
= NULL
;
3309 case DHD_BROADCAST_FILTER_NUM
:
3310 filterp
= "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
3313 case DHD_MULTICAST4_FILTER_NUM
:
3315 if (FW_SUPPORTED((dhdp
), pf6
)) {
3316 if (dhdp
->pktfilter
[num
] != NULL
) {
3317 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
3318 dhdp
->pktfilter
[num
] = NULL
;
3321 filterp
= DISCARD_IPV4_MCAST
;
3326 filterp
= "102 0 0 0 0xFFFFFF 0x01005E";
3328 case DHD_MULTICAST6_FILTER_NUM
:
3330 if (FW_SUPPORTED((dhdp
), pf6
)) {
3331 if (dhdp
->pktfilter
[num
] != NULL
) {
3332 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
3333 dhdp
->pktfilter
[num
] = NULL
;
3336 filterp
= DISCARD_IPV6_MCAST
;
3341 filterp
= "103 0 0 0 0xFFFF 0x3333";
3343 case DHD_MDNS_FILTER_NUM
:
3344 filterp
= "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
3347 case DHD_ARP_FILTER_NUM
:
3348 filterp
= "105 0 0 12 0xFFFF 0x0806";
3351 case DHD_BROADCAST_ARP_FILTER_NUM
:
3352 filterp
= "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
3353 " 0xFFFFFFFFFFFF0000000000000806";
3362 dhdp
->pktfilter
[num
] = filterp
;
3363 dhd_pktfilter_offload_set(dhdp
, dhdp
->pktfilter
[num
]);
3364 } else { /* Delete filter */
3365 if (dhdp
->pktfilter
[num
]) {
3366 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
3367 dhdp
->pktfilter
[num
] = NULL
;
3373 #endif /* PKT_FILTER_SUPPORT */
3375 static int dhd_set_suspend(int value
, dhd_pub_t
*dhd
)
3377 int power_mode
= PM_MAX
;
3378 #ifdef SUPPORT_SENSORHUB
3379 shub_control_t shub_ctl
;
3380 #endif /* SUPPORT_SENSORHUB */
3381 /* wl_pkt_filter_enable_t enable_parm; */
3382 int bcn_li_dtim
= 0; /* Default bcn_li_dtim in resume mode is 0 */
3384 #ifdef DHD_USE_EARLYSUSPEND
3385 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3386 int bcn_timeout
= 0;
3387 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3388 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3389 int roam_time_thresh
= 0; /* (ms) */
3390 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3391 #ifndef ENABLE_FW_ROAM_SUSPEND
3392 uint roamvar
= dhd
->conf
->roam_off_suspend
;
3393 #endif /* ENABLE_FW_ROAM_SUSPEND */
3394 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3396 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3397 uint nd_ra_filter
= 0;
3398 #endif /* DHD_USE_EARLYSUSPEND */
3399 #ifdef PASS_ALL_MCAST_PKTS
3400 struct dhd_info
*dhdinfo
;
3403 #endif /* PASS_ALL_MCAST_PKTS */
3404 #ifdef ENABLE_IPMCAST_FILTER
3405 int ipmcast_l2filter
;
3406 #endif /* ENABLE_IPMCAST_FILTER */
3407 #ifdef DYNAMIC_SWOOB_DURATION
3408 #ifndef CUSTOM_INTR_WIDTH
3409 #define CUSTOM_INTR_WIDTH 100
3411 #endif /* CUSTOM_INTR_WIDTH */
3412 #endif /* DYNAMIC_SWOOB_DURATION */
3414 #if defined(BCMPCIE)
3416 int dtim_period
= 0;
3417 int bcn_interval
= 0;
3419 #ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3420 int bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
3422 bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
3423 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3424 #endif /* OEM_ANDROID && BCMPCIE */
3429 #ifdef PASS_ALL_MCAST_PKTS
3430 dhdinfo
= dhd
->info
;
3431 #endif /* PASS_ALL_MCAST_PKTS */
3433 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
3434 __FUNCTION__
, value
, dhd
->in_suspend
));
3436 dhd_suspend_lock(dhd
);
3438 #ifdef CUSTOM_SET_CPUCORE
3439 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__
, value
));
3440 /* set specific cpucore */
3441 dhd_set_cpucore(dhd
, TRUE
);
3442 #endif /* CUSTOM_SET_CPUCORE */
3444 if (dhd
->conf
->pm
>= 0)
3445 power_mode
= dhd
->conf
->pm
;
3447 power_mode
= PM_FAST
;
3450 if (value
&& dhd
->in_suspend
) {
3451 #ifdef PKT_FILTER_SUPPORT
3452 dhd
->early_suspended
= 1;
3454 /* Kernel suspended */
3455 DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__
));
3457 if (dhd
->conf
->pm_in_suspend
>= 0)
3458 power_mode
= dhd
->conf
->pm_in_suspend
;
3459 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
3460 sizeof(power_mode
), TRUE
, 0);
3462 #ifdef PKT_FILTER_SUPPORT
3463 /* Enable packet filter,
3464 * only allow unicast packet to send up
3466 dhd_enable_packet_filter(1, dhd
);
3468 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd
));
3470 #endif /* PKT_FILTER_SUPPORT */
3472 #ifdef SUPPORT_SENSORHUB
3473 shub_ctl
.enable
= 1;
3474 shub_ctl
.cmd
= 0x000;
3475 shub_ctl
.op_mode
= 1;
3476 shub_ctl
.interval
= 0;
3477 if (dhd
->info
->shub_enable
== 1) {
3478 ret
= dhd_iovar(dhd
, 0, "shub_msreq",
3479 (char *)&shub_ctl
, sizeof(shub_ctl
), NULL
, 0, TRUE
);
3481 DHD_ERROR(("%s SensorHub MS start: failed %d\n",
3482 __FUNCTION__
, ret
));
3485 #endif /* SUPPORT_SENSORHUB */
3488 #ifdef PASS_ALL_MCAST_PKTS
3490 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3491 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
3492 dhd_iovar(dhd
, i
, "allmulti", (char *)&allmulti
,
3493 sizeof(allmulti
), NULL
, 0, TRUE
);
3496 #endif /* PASS_ALL_MCAST_PKTS */
3498 /* If DTIM skip is set up as default, force it to wake
3499 * each third DTIM for better power savings. Note that
3500 * one side effect is a chance to miss BC/MC packet.
3503 /* Do not set bcn_li_ditm on WFD mode */
3504 if (dhd
->tdls_mode
) {
3508 #if defined(BCMPCIE)
3509 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
, &dtim_period
,
3511 dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3512 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3514 if ((bcn_li_dtim
* dtim_period
* bcn_interval
) >=
3515 MIN_DTIM_FOR_ROAM_THRES_EXTEND
) {
3517 * Increase max roaming threshold from 2 secs to 8 secs
3518 * the real roam threshold is MIN(max_roam_threshold,
3522 dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
), NULL
,
3527 * if bcn_to_dly is 1, the real roam threshold is
3528 * MIN(max_roam_threshold, bcn_timeout -1);
3529 * notify link down event after roaming procedure complete
3530 * if we hit bcn_timeout while we are in roaming progress.
3532 dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3533 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3534 /* Increase beacon timeout to 6 secs or use bigger one */
3535 bcn_timeout
= max(bcn_timeout
, BCN_TIMEOUT_IN_SUSPEND
);
3536 dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3537 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3540 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
);
3541 if (dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3542 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
) < 0)
3543 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__
));
3544 #endif /* OEM_ANDROID && BCMPCIE */
3546 #ifdef DHD_USE_EARLYSUSPEND
3547 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3548 bcn_timeout
= CUSTOM_BCN_TIMEOUT_IN_SUSPEND
;
3549 dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3550 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3551 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3552 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3553 roam_time_thresh
= CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
;
3554 dhd_iovar(dhd
, 0, "roam_time_thresh", (char *)&roam_time_thresh
,
3555 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3556 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3557 #ifndef ENABLE_FW_ROAM_SUSPEND
3558 /* Disable firmware roaming during suspend */
3559 dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
, sizeof(roamvar
),
3561 #endif /* ENABLE_FW_ROAM_SUSPEND */
3562 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3564 dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3565 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3566 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3567 #ifdef NDO_CONFIG_SUPPORT
3568 if (dhd
->ndo_enable
) {
3569 if (!dhd
->ndo_host_ip_overflow
) {
3570 /* enable ND offload on suspend */
3571 ret
= dhd_ndo_enable(dhd
, 1);
3573 DHD_ERROR(("%s: failed to enable NDO\n",
3577 DHD_INFO(("%s: NDO disabled on suspend due to"
3578 "HW capacity\n", __FUNCTION__
));
3581 #endif /* NDO_CONFIG_SUPPORT */
3583 if (FW_SUPPORTED(dhd
, ndoe
))
3585 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
))
3588 /* enable IPv6 RA filter in firmware during suspend */
3590 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3591 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3594 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3597 dhd_os_suppress_logging(dhd
, TRUE
);
3598 #ifdef ENABLE_IPMCAST_FILTER
3599 ipmcast_l2filter
= 1;
3600 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3601 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3603 #endif /* ENABLE_IPMCAST_FILTER */
3604 #ifdef DYNAMIC_SWOOB_DURATION
3605 intr_width
= CUSTOM_INTR_WIDTH
;
3606 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3607 sizeof(intr_width
), NULL
, 0, TRUE
);
3609 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3611 #endif /* DYNAMIC_SWOOB_DURATION */
3612 #endif /* DHD_USE_EARLYSUSPEND */
3613 dhd_conf_set_ap_in_suspend(dhd
, value
);
3615 dhd_conf_set_ap_in_suspend(dhd
, value
);
3616 #ifdef PKT_FILTER_SUPPORT
3617 dhd
->early_suspended
= 0;
3619 /* Kernel resumed */
3620 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__
));
3622 #ifdef SUPPORT_SENSORHUB
3623 shub_ctl
.enable
= 1;
3624 shub_ctl
.cmd
= 0x000;
3625 shub_ctl
.op_mode
= 0;
3626 shub_ctl
.interval
= 0;
3627 if (dhd
->info
->shub_enable
== 1) {
3628 ret
= dhd_iovar(dhd
, 0, "shub_msreq",
3629 (char *)&shub_ctl
, sizeof(shub_ctl
),
3632 DHD_ERROR(("%s SensorHub MS stop: failed %d\n",
3633 __FUNCTION__
, ret
));
3636 #endif /* SUPPORT_SENSORHUB */
3638 #ifdef DYNAMIC_SWOOB_DURATION
3640 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3641 sizeof(intr_width
), NULL
, 0, TRUE
);
3643 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3645 #endif /* DYNAMIC_SWOOB_DURATION */
3646 #ifndef SUPPORT_PM2_ONLY
3647 power_mode
= PM_FAST
;
3648 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
3649 sizeof(power_mode
), TRUE
, 0);
3650 #endif /* SUPPORT_PM2_ONLY */
3651 #ifdef PKT_FILTER_SUPPORT
3652 /* disable pkt filter */
3653 dhd_enable_packet_filter(0, dhd
);
3655 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd
));
3657 #endif /* PKT_FILTER_SUPPORT */
3658 #ifdef PASS_ALL_MCAST_PKTS
3660 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3661 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
3662 dhd_iovar(dhd
, i
, "allmulti", (char *)&allmulti
,
3663 sizeof(allmulti
), NULL
, 0, TRUE
);
3665 #endif /* PASS_ALL_MCAST_PKTS */
3666 #if defined(BCMPCIE)
3667 /* restore pre-suspend setting */
3668 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3669 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3671 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__
, ret
));
3674 dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
), NULL
, 0,
3677 dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3678 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3680 dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3681 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3683 /* restore pre-suspend setting for dtim_skip */
3684 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3685 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3687 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__
, ret
));
3689 #endif /* OEM_ANDROID && BCMPCIE */
3690 #ifdef DHD_USE_EARLYSUSPEND
3691 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3692 bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
3693 dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3694 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3695 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3696 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3697 roam_time_thresh
= 2000;
3698 dhd_iovar(dhd
, 0, "roam_time_thresh", (char *)&roam_time_thresh
,
3699 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3701 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3702 #ifndef ENABLE_FW_ROAM_SUSPEND
3703 roamvar
= dhd_roam_disable
;
3704 dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
, sizeof(roamvar
),
3706 #endif /* ENABLE_FW_ROAM_SUSPEND */
3707 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3709 dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3710 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3711 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3712 #ifdef NDO_CONFIG_SUPPORT
3713 if (dhd
->ndo_enable
) {
3714 /* Disable ND offload on resume */
3715 ret
= dhd_ndo_enable(dhd
, 0);
3717 DHD_ERROR(("%s: failed to disable NDO\n",
3721 #endif /* NDO_CONFIG_SUPPORT */
3723 if (FW_SUPPORTED(dhd
, ndoe
))
3725 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
))
3728 /* disable IPv6 RA filter in firmware during suspend */
3730 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3731 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3734 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3738 dhd_os_suppress_logging(dhd
, FALSE
);
3739 #ifdef ENABLE_IPMCAST_FILTER
3740 ipmcast_l2filter
= 0;
3741 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3742 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3744 #endif /* ENABLE_IPMCAST_FILTER */
3745 #endif /* DHD_USE_EARLYSUSPEND */
3747 /* terence 2017029: Reject in early suspend */
3748 if (!dhd
->conf
->xmit_in_suspend
) {
3749 dhd_txflowcontrol(dhd
, ALL_INTERFACES
, OFF
);
3753 dhd_suspend_unlock(dhd
);
3758 static int dhd_suspend_resume_helper(struct dhd_info
*dhd
, int val
, int force
)
3760 dhd_pub_t
*dhdp
= &dhd
->pub
;
3763 DHD_OS_WAKE_LOCK(dhdp
);
3764 DHD_PERIM_LOCK(dhdp
);
3766 /* Set flag when early suspend was called */
3767 dhdp
->in_suspend
= val
;
3768 if ((force
|| !dhdp
->suspend_disable_flag
) &&
3769 (dhd_support_sta_mode(dhdp
) || dhd_conf_get_ap_mode_in_suspend(dhdp
)))
3771 ret
= dhd_set_suspend(val
, dhdp
);
3774 DHD_PERIM_UNLOCK(dhdp
);
3775 DHD_OS_WAKE_UNLOCK(dhdp
);
3779 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3780 static void dhd_early_suspend(struct early_suspend
*h
)
3782 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3783 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3786 dhd_suspend_resume_helper(dhd
, 1, 0);
3789 static void dhd_late_resume(struct early_suspend
*h
)
3791 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3792 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3795 dhd_suspend_resume_helper(dhd
, 0, 0);
3797 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3800 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3801 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3803 * dhd_timeout_start(&tmo, usec);
3804 * while (!dhd_timeout_expired(&tmo))
3805 * if (poll_something())
3807 * if (dhd_timeout_expired(&tmo))
3812 dhd_timeout_start(dhd_timeout_t
*tmo
, uint usec
)
3817 tmo
->tick
= jiffies_to_usecs(1);
3821 dhd_timeout_expired(dhd_timeout_t
*tmo
)
3823 /* Does nothing the first call */
3824 if (tmo
->increment
== 0) {
3829 if (tmo
->elapsed
>= tmo
->limit
)
3832 /* Add the delay that's about to take place */
3833 tmo
->elapsed
+= tmo
->increment
;
3835 if ((!CAN_SLEEP()) || tmo
->increment
< tmo
->tick
) {
3836 OSL_DELAY(tmo
->increment
);
3837 tmo
->increment
*= 2;
3838 if (tmo
->increment
> tmo
->tick
)
3839 tmo
->increment
= tmo
->tick
;
3841 wait_queue_head_t delay_wait
;
3842 DECLARE_WAITQUEUE(wait
, current
);
3843 init_waitqueue_head(&delay_wait
);
3844 add_wait_queue(&delay_wait
, &wait
);
3845 set_current_state(TASK_INTERRUPTIBLE
);
3846 (void)schedule_timeout(1);
3847 remove_wait_queue(&delay_wait
, &wait
);
3848 set_current_state(TASK_RUNNING
);
3855 dhd_net2idx(dhd_info_t
*dhd
, struct net_device
*net
)
3860 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__
));
3864 while (i
< DHD_MAX_IFS
) {
3865 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->net
&& (dhd
->iflist
[i
]->net
== net
))
3873 struct net_device
* dhd_idx2net(void *pub
, int ifidx
)
3875 struct dhd_pub
*dhd_pub
= (struct dhd_pub
*)pub
;
3876 struct dhd_info
*dhd_info
;
3878 if (!dhd_pub
|| ifidx
< 0 || ifidx
>= DHD_MAX_IFS
)
3880 dhd_info
= dhd_pub
->info
;
3881 if (dhd_info
&& dhd_info
->iflist
[ifidx
])
3882 return dhd_info
->iflist
[ifidx
]->net
;
3887 dhd_ifname2idx(dhd_info_t
*dhd
, char *name
)
3889 int i
= DHD_MAX_IFS
;
3893 if (name
== NULL
|| *name
== '\0')
3897 if (dhd
->iflist
[i
] && !strncmp(dhd
->iflist
[i
]->dngl_name
, name
, IFNAMSIZ
))
3900 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__
, i
, name
));
3902 return i
; /* default - the primary interface */
3906 dhd_ifname(dhd_pub_t
*dhdp
, int ifidx
)
3908 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3912 if (ifidx
< 0 || ifidx
>= DHD_MAX_IFS
) {
3913 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__
, ifidx
));
3917 if (dhd
->iflist
[ifidx
] == NULL
) {
3918 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__
, ifidx
));
3922 if (dhd
->iflist
[ifidx
]->net
)
3923 return dhd
->iflist
[ifidx
]->net
->name
;
3929 dhd_bssidx2bssid(dhd_pub_t
*dhdp
, int idx
)
3932 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
;
3935 for (i
= 0; i
< DHD_MAX_IFS
; i
++)
3936 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->bssidx
== idx
)
3937 return dhd
->iflist
[i
]->mac_addr
;
3944 _dhd_set_multicast_list(dhd_info_t
*dhd
, int ifidx
)
3946 struct net_device
*dev
;
3947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3948 struct netdev_hw_addr
*ha
;
3950 struct dev_mc_list
*mclist
;
3952 uint32 allmulti
, cnt
;
3959 if (!dhd
->iflist
[ifidx
]) {
3960 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__
, ifidx
));
3963 dev
= dhd
->iflist
[ifidx
]->net
;
3966 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3967 netif_addr_lock_bh(dev
);
3968 #endif /* LINUX >= 2.6.27 */
3969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3970 cnt
= netdev_mc_count(dev
);
3972 cnt
= dev
->mc_count
;
3973 #endif /* LINUX >= 2.6.35 */
3974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3975 netif_addr_unlock_bh(dev
);
3976 #endif /* LINUX >= 2.6.27 */
3978 /* Determine initial value of allmulti flag */
3979 allmulti
= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
3981 #ifdef PASS_ALL_MCAST_PKTS
3982 #ifdef PKT_FILTER_SUPPORT
3983 if (!dhd
->pub
.early_suspended
)
3984 #endif /* PKT_FILTER_SUPPORT */
3986 #endif /* PASS_ALL_MCAST_PKTS */
3988 /* Send down the multicast list first. */
3991 buflen
= sizeof("mcast_list") + sizeof(cnt
) + (cnt
* ETHER_ADDR_LEN
);
3992 if (!(bufp
= buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
3993 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3994 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
3998 strncpy(bufp
, "mcast_list", buflen
- 1);
3999 bufp
[buflen
- 1] = '\0';
4000 bufp
+= strlen("mcast_list") + 1;
4003 memcpy(bufp
, &cnt
, sizeof(cnt
));
4004 bufp
+= sizeof(cnt
);
4006 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
4007 netif_addr_lock_bh(dev
);
4008 #endif /* LINUX >= 2.6.27 */
4009 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
4010 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4011 #pragma GCC diagnostic push
4012 #pragma GCC diagnostic ignored "-Wcast-qual"
4014 netdev_for_each_mc_addr(ha
, dev
) {
4017 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
4018 bufp
+= ETHER_ADDR_LEN
;
4021 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4022 #pragma GCC diagnostic pop
4024 #else /* LINUX < 2.6.35 */
4025 for (mclist
= dev
->mc_list
; (mclist
&& (cnt
> 0));
4026 cnt
--, mclist
= mclist
->next
) {
4027 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
4028 bufp
+= ETHER_ADDR_LEN
;
4030 #endif /* LINUX >= 2.6.35 */
4031 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
4032 netif_addr_unlock_bh(dev
);
4033 #endif /* LINUX >= 2.6.27 */
4035 memset(&ioc
, 0, sizeof(ioc
));
4036 ioc
.cmd
= WLC_SET_VAR
;
4041 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
4043 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
4044 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
4045 allmulti
= cnt
? TRUE
: allmulti
;
4048 MFREE(dhd
->pub
.osh
, buf
, buflen
);
4050 /* Now send the allmulti setting. This is based on the setting in the
4051 * net_device flags, but might be modified above to be turned on if we
4052 * were trying to set some addresses and dongle rejected it...
4055 allmulti
= htol32(allmulti
);
4056 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "allmulti", (char *)&allmulti
,
4057 sizeof(allmulti
), NULL
, 0, TRUE
);
4059 DHD_ERROR(("%s: set allmulti %d failed\n",
4060 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
4063 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
4065 allmulti
= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
4067 allmulti
= htol32(allmulti
);
4069 memset(&ioc
, 0, sizeof(ioc
));
4070 ioc
.cmd
= WLC_SET_PROMISC
;
4071 ioc
.buf
= &allmulti
;
4072 ioc
.len
= sizeof(allmulti
);
4075 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
4077 DHD_ERROR(("%s: set promisc %d failed\n",
4078 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
4083 _dhd_set_mac_address(dhd_info_t
*dhd
, int ifidx
, uint8
*addr
)
4087 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "cur_etheraddr", (char *)addr
,
4088 ETHER_ADDR_LEN
, NULL
, 0, TRUE
);
4090 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd
->pub
, ifidx
)));
4092 memcpy(dhd
->iflist
[ifidx
]->net
->dev_addr
, addr
, ETHER_ADDR_LEN
);
4094 memcpy(dhd
->pub
.mac
.octet
, addr
, ETHER_ADDR_LEN
);
4101 extern struct net_device
*ap_net_dev
;
4102 extern tsk_ctl_t ap_eth_ctl
; /* ap netdev heper thread ctl */
4106 void dhd_update_psta_interface_for_sta(dhd_pub_t
* dhdp
, char* ifname
, void* ea
,
4109 struct wl_psta_primary_intf_event
*psta_prim_event
=
4110 (struct wl_psta_primary_intf_event
*)event_data
;
4111 dhd_sta_t
*psta_interface
= NULL
;
4112 dhd_sta_t
*sta
= NULL
;
4115 ASSERT(psta_prim_event
);
4118 ifindex
= (uint8
)dhd_ifname2idx(dhdp
->info
, ifname
);
4119 sta
= dhd_find_sta(dhdp
, ifindex
, ea
);
4121 psta_interface
= dhd_find_sta(dhdp
, ifindex
,
4122 (void *)(psta_prim_event
->prim_ea
.octet
));
4123 if (psta_interface
!= NULL
) {
4124 sta
->psta_prim
= psta_interface
;
4129 /* Get wmf_psta_disable configuration configuration */
4130 int dhd_get_wmf_psta_disable(dhd_pub_t
*dhdp
, uint32 idx
)
4132 dhd_info_t
*dhd
= dhdp
->info
;
4134 ASSERT(idx
< DHD_MAX_IFS
);
4135 ifp
= dhd
->iflist
[idx
];
4136 return ifp
->wmf_psta_disable
;
4139 /* Set wmf_psta_disable configuration configuration */
4140 int dhd_set_wmf_psta_disable(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
4142 dhd_info_t
*dhd
= dhdp
->info
;
4144 ASSERT(idx
< DHD_MAX_IFS
);
4145 ifp
= dhd
->iflist
[idx
];
4146 ifp
->wmf_psta_disable
= val
;
4149 #endif /* DHD_WMF */
4152 /* Get psta/psr configuration configuration */
4153 int dhd_get_psta_mode(dhd_pub_t
*dhdp
)
4155 dhd_info_t
*dhd
= dhdp
->info
;
4156 return (int)dhd
->psta_mode
;
4158 /* Set psta/psr configuration configuration */
4159 int dhd_set_psta_mode(dhd_pub_t
*dhdp
, uint32 val
)
4161 dhd_info_t
*dhd
= dhdp
->info
;
4162 dhd
->psta_mode
= val
;
4165 #endif /* DHD_PSTA */
4167 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
4169 dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
)
4171 dhd_info_t
*dhd
= dhdp
->info
;
4174 ASSERT(idx
< DHD_MAX_IFS
);
4176 ifp
= dhd
->iflist
[idx
];
4179 #ifdef DHD_L2_FILTER
4180 (ifp
->block_ping
) ||
4185 #ifdef DHD_MCAST_REGEN
4186 (ifp
->mcast_regen_bss_enable
) ||
4189 ifp
->rx_pkt_chainable
= FALSE
;
4192 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
4195 /* Get wet configuration configuration */
4196 int dhd_get_wet_mode(dhd_pub_t
*dhdp
)
4198 dhd_info_t
*dhd
= dhdp
->info
;
4199 return (int)dhd
->wet_mode
;
4202 /* Set wet configuration configuration */
4203 int dhd_set_wet_mode(dhd_pub_t
*dhdp
, uint32 val
)
4205 dhd_info_t
*dhd
= dhdp
->info
;
4206 dhd
->wet_mode
= val
;
4207 dhd_update_rx_pkt_chainable_state(dhdp
, 0);
4210 #endif /* DHD_WET */
4212 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4213 int32
dhd_role_to_nl80211_iftype(int32 role
)
4216 case WLC_E_IF_ROLE_STA
:
4217 return NL80211_IFTYPE_STATION
;
4218 case WLC_E_IF_ROLE_AP
:
4219 return NL80211_IFTYPE_AP
;
4220 case WLC_E_IF_ROLE_WDS
:
4221 return NL80211_IFTYPE_WDS
;
4222 case WLC_E_IF_ROLE_P2P_GO
:
4223 return NL80211_IFTYPE_P2P_GO
;
4224 case WLC_E_IF_ROLE_P2P_CLIENT
:
4225 return NL80211_IFTYPE_P2P_CLIENT
;
4226 case WLC_E_IF_ROLE_IBSS
:
4227 case WLC_E_IF_ROLE_NAN
:
4228 return NL80211_IFTYPE_ADHOC
;
4230 return NL80211_IFTYPE_UNSPECIFIED
;
4233 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4236 dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
)
4238 dhd_info_t
*dhd
= handle
;
4239 dhd_if_event_t
*if_event
= event_info
;
4240 struct net_device
*ndev
;
4243 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4244 struct wl_if_event_info info
;
4245 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4247 if (event
!= DHD_WQ_WORK_IF_ADD
) {
4248 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4253 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4258 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
4262 dhd_net_if_lock_local(dhd
);
4263 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4264 DHD_PERIM_LOCK(&dhd
->pub
);
4266 ifidx
= if_event
->event
.ifidx
;
4267 bssidx
= if_event
->event
.bssidx
;
4268 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__
, ifidx
));
4271 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4272 if (if_event
->event
.ifidx
> 0) {
4273 bzero(&info
, sizeof(info
));
4274 info
.ifidx
= if_event
->event
.ifidx
;
4275 info
.bssidx
= if_event
->event
.bssidx
;
4276 info
.role
= if_event
->event
.role
;
4277 strncpy(info
.name
, if_event
->name
, IFNAMSIZ
);
4278 if (wl_cfg80211_post_ifcreate(dhd
->pub
.info
->iflist
[0]->net
,
4279 &info
, if_event
->mac
, NULL
, true) != NULL
) {
4280 /* Do the post interface create ops */
4281 DHD_ERROR(("Post ifcreate ops done. Returning \n"));
4285 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4287 /* This path is for non-android case */
4288 /* The interface name in host and in event msg are same */
4289 /* if name in event msg is used to create dongle if list on host */
4290 ndev
= dhd_allocate_if(&dhd
->pub
, ifidx
, if_event
->name
,
4291 if_event
->mac
, bssidx
, TRUE
, if_event
->name
);
4293 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__
));
4297 DHD_PERIM_UNLOCK(&dhd
->pub
);
4298 ret
= dhd_register_if(&dhd
->pub
, ifidx
, TRUE
);
4299 DHD_PERIM_LOCK(&dhd
->pub
);
4300 if (ret
!= BCME_OK
) {
4301 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
4302 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
4305 #ifndef PCIE_FULL_DONGLE
4306 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
4307 if (FW_SUPPORTED((&dhd
->pub
), ap
) && (if_event
->event
.role
!= WLC_E_IF_ROLE_STA
)) {
4309 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "ap_isolate", (char *)&var_int
, sizeof(var_int
),
4311 if (ret
!= BCME_OK
) {
4312 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__
));
4313 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
4316 #endif /* PCIE_FULL_DONGLE */
4319 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
4321 DHD_PERIM_UNLOCK(&dhd
->pub
);
4322 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4323 dhd_net_if_unlock_local(dhd
);
4327 dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
)
4329 dhd_info_t
*dhd
= handle
;
4331 dhd_if_event_t
*if_event
= event_info
;
4334 if (event
!= DHD_WQ_WORK_IF_DEL
) {
4335 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4340 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4345 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
4349 dhd_net_if_lock_local(dhd
);
4350 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4351 DHD_PERIM_LOCK(&dhd
->pub
);
4353 ifidx
= if_event
->event
.ifidx
;
4354 DHD_TRACE(("Removing interface with idx %d\n", ifidx
));
4356 DHD_PERIM_UNLOCK(&dhd
->pub
);
4357 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4358 if (if_event
->event
.ifidx
> 0) {
4359 /* Do the post interface del ops */
4360 if (wl_cfg80211_post_ifdel(dhd
->pub
.info
->iflist
[ifidx
]->net
, true) == 0) {
4361 DHD_TRACE(("Post ifdel ops done. Returning \n"));
4362 DHD_PERIM_LOCK(&dhd
->pub
);
4366 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4368 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
4369 DHD_PERIM_LOCK(&dhd
->pub
);
4371 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4373 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4374 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
4376 DHD_PERIM_UNLOCK(&dhd
->pub
);
4377 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4378 dhd_net_if_unlock_local(dhd
);
4381 #ifdef DHD_UPDATE_INTF_MAC
4383 dhd_ifupdate_event_handler(void *handle
, void *event_info
, u8 event
)
4385 dhd_info_t
*dhd
= handle
;
4387 dhd_if_event_t
*if_event
= event_info
;
4389 if (event
!= DHD_WQ_WORK_IF_UPDATE
) {
4390 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4395 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4400 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
4404 dhd_net_if_lock_local(dhd
);
4405 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4407 ifidx
= if_event
->event
.ifidx
;
4408 DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__
, ifidx
));
4410 dhd_op_if_update(&dhd
->pub
, ifidx
);
4412 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
4414 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4415 dhd_net_if_unlock_local(dhd
);
4418 int dhd_op_if_update(dhd_pub_t
*dhdpub
, int ifidx
)
4420 dhd_info_t
* dhdinfo
= NULL
;
4421 dhd_if_t
* ifp
= NULL
;
4425 if ((NULL
==dhdpub
)||(NULL
==dhdpub
->info
)) {
4426 DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__
));
4429 dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
4430 ifp
= dhdinfo
->iflist
[ifidx
];
4432 DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__
));
4437 DHD_TRACE(("%s: idx %d\n", __FUNCTION__
, ifidx
));
4439 strcpy(buf
, "cur_etheraddr");
4440 ret
= dhd_wl_ioctl_cmd(&dhdinfo
->pub
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, ifp
->idx
);
4442 DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp
->name
, ret
));
4444 dhdinfo
->iflist
[ifp
->idx
]->mac_addr
[5] += 1;
4445 // force locally administrate address
4446 ETHER_SET_LOCALADDR(&dhdinfo
->iflist
[ifp
->idx
]->mac_addr
);
4448 DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
4449 ifp
->name
, ifp
->idx
,
4450 (unsigned char)buf
[0], (unsigned char)buf
[1], (unsigned char)buf
[2],
4451 (unsigned char)buf
[3], (unsigned char)buf
[4], (unsigned char)buf
[5]));
4452 memcpy(dhdinfo
->iflist
[ifp
->idx
]->mac_addr
, buf
, ETHER_ADDR_LEN
);
4453 if (dhdinfo
->iflist
[ifp
->idx
]->net
) {
4454 memcpy(dhdinfo
->iflist
[ifp
->idx
]->net
->dev_addr
, buf
, ETHER_ADDR_LEN
);
4460 #endif /* DHD_UPDATE_INTF_MAC */
4463 dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
)
4465 dhd_info_t
*dhd
= handle
;
4466 dhd_if_t
*ifp
= event_info
;
4468 if (event
!= DHD_WQ_WORK_SET_MAC
) {
4469 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4473 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4477 dhd_net_if_lock_local(dhd
);
4478 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4479 DHD_PERIM_LOCK(&dhd
->pub
);
4483 unsigned long flags
;
4485 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4486 in_ap
= (ap_net_dev
!= NULL
);
4487 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4490 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4497 // terence 20160907: fix for not able to set mac when wlan0 is down
4498 if (ifp
== NULL
|| !ifp
->set_macaddress
) {
4501 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4502 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4506 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__
));
4507 ifp
->set_macaddress
= FALSE
;
4508 if (_dhd_set_mac_address(dhd
, ifp
->idx
, ifp
->mac_addr
) == 0)
4509 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
4511 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
4514 DHD_PERIM_UNLOCK(&dhd
->pub
);
4515 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4516 dhd_net_if_unlock_local(dhd
);
4520 dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
)
4522 dhd_info_t
*dhd
= handle
;
4523 int ifidx
= (int)((long int)event_info
);
4524 dhd_if_t
*ifp
= NULL
;
4526 if (event
!= DHD_WQ_WORK_SET_MCAST_LIST
) {
4527 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4532 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4536 dhd_net_if_lock_local(dhd
);
4537 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4538 DHD_PERIM_LOCK(&dhd
->pub
);
4540 ifp
= dhd
->iflist
[ifidx
];
4542 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4543 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4550 unsigned long flags
;
4551 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4552 in_ap
= (ap_net_dev
!= NULL
);
4553 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4556 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4558 ifp
->set_multicast
= FALSE
;
4564 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4565 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4572 _dhd_set_multicast_list(dhd
, ifidx
);
4573 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__
, ifidx
));
4576 DHD_PERIM_UNLOCK(&dhd
->pub
);
4577 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4578 dhd_net_if_unlock_local(dhd
);
4582 dhd_set_mac_address(struct net_device
*dev
, void *addr
)
4586 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4587 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
4591 ifidx
= dhd_net2idx(dhd
, dev
);
4592 if (ifidx
== DHD_BAD_IF
)
4595 dhdif
= dhd
->iflist
[ifidx
];
4597 dhd_net_if_lock_local(dhd
);
4598 memcpy(dhdif
->mac_addr
, sa
->sa_data
, ETHER_ADDR_LEN
);
4599 dhdif
->set_macaddress
= TRUE
;
4600 dhd_net_if_unlock_local(dhd
);
4601 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhdif
, DHD_WQ_WORK_SET_MAC
,
4602 dhd_set_mac_addr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4607 dhd_set_multicast_list(struct net_device
*dev
)
4609 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4612 ifidx
= dhd_net2idx(dhd
, dev
);
4613 if (ifidx
== DHD_BAD_IF
)
4616 dhd
->iflist
[ifidx
]->set_multicast
= TRUE
;
4617 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)((long int)ifidx
),
4618 DHD_WQ_WORK_SET_MCAST_LIST
, dhd_set_mcast_list_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4620 // terence 20160907: fix for not able to set mac when wlan0 is down
4621 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhd
->iflist
[ifidx
],
4622 DHD_WQ_WORK_SET_MAC
, dhd_set_mac_addr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4625 #ifdef DHD_UCODE_DOWNLOAD
4626 /* Get ucode path */
4628 dhd_get_ucode_path(dhd_pub_t
*dhdp
)
4630 dhd_info_t
*dhd
= dhdp
->info
;
4631 return dhd
->uc_path
;
4633 #endif /* DHD_UCODE_DOWNLOAD */
4635 #ifdef PROP_TXSTATUS
4637 dhd_os_wlfc_block(dhd_pub_t
*pub
)
4639 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4641 /* terence 20161229: don't do spin lock if proptx not enabled */
4645 spin_lock_irqsave(&di
->wlfc_spinlock
, di
->wlfc_lock_flags
);
4647 spin_lock_bh(&di
->wlfc_spinlock
);
4648 #endif /* BCMDBUS */
4653 dhd_os_wlfc_unblock(dhd_pub_t
*pub
)
4655 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4658 /* terence 20161229: don't do spin lock if proptx not enabled */
4662 spin_unlock_irqrestore(&di
->wlfc_spinlock
, di
->wlfc_lock_flags
);
4664 spin_unlock_bh(&di
->wlfc_spinlock
);
4665 #endif /* BCMDBUS */
4669 #endif /* PROP_TXSTATUS */
4671 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
4677 static const PKTTYPE_INFO packet_type_info
[] =
4679 { ETHER_TYPE_IP
, "IP" },
4680 { ETHER_TYPE_ARP
, "ARP" },
4681 { ETHER_TYPE_BRCM
, "BRCM" },
4682 { ETHER_TYPE_802_1X
, "802.1X" },
4683 { ETHER_TYPE_WAI
, "WAPI" },
4687 static const char *_get_packet_type_str(uint16 type
)
4690 int n
= sizeof(packet_type_info
)/sizeof(packet_type_info
[1]) - 1;
4692 for (i
= 0; i
< n
; i
++) {
4693 if (packet_type_info
[i
].type
== type
)
4694 return packet_type_info
[i
].str
;
4697 return packet_type_info
[n
].str
;
4701 dhd_trx_dump(struct net_device
*ndev
, uint8
*dump_data
, uint datalen
, bool tx
)
4706 protocol
= (dump_data
[12] << 8) | dump_data
[13];
4707 ifname
= ndev
? ndev
->name
: "N/A";
4709 if (protocol
!= ETHER_TYPE_BRCM
) {
4710 DHD_ERROR(("%s DUMP[%s] - %s\n", tx
?"Tx":"Rx", ifname
,
4711 _get_packet_type_str(protocol
)));
4712 #if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
4713 prhex("Data", dump_data
, datalen
);
4714 #endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
4717 #endif /* DHD_TX_DUMP || DHD_RX_DUMP */
4719 /* This routine do not support Packet chain feature, Currently tested for
4722 int dhd_sendup(dhd_pub_t
*dhdp
, int ifidx
, void *p
)
4724 struct sk_buff
*skb
;
4725 void *skbhead
= NULL
;
4726 void *skbprev
= NULL
;
4728 ASSERT(!PKTISCHAINED(p
));
4729 skb
= PKTTONATIVE(dhdp
->osh
, p
);
4731 ifp
= dhdp
->info
->iflist
[ifidx
];
4732 skb
->dev
= ifp
->net
;
4733 #if defined(BCM_GMAC3)
4734 /* Forwarder capable interfaces use WOFA based forwarding */
4736 struct ether_header
*eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, p
);
4737 uint16
* da
= (uint16
*)(eh
->ether_dhost
);
4738 uintptr_t wofa_data
;
4739 ASSERT(ISALIGNED(da
, 2));
4741 wofa_data
= fwder_lookup(ifp
->fwdh
->mate
, da
, ifp
->idx
);
4742 if (wofa_data
== WOFA_DATA_INVALID
) { /* Unknown MAC address */
4743 if (fwder_transmit(ifp
->fwdh
, skb
, 1, skb
->dev
) == FWDER_SUCCESS
) {
4747 PKTFRMNATIVE(dhdp
->osh
, p
);
4748 PKTFREE(dhdp
->osh
, p
, FALSE
);
4751 #endif /* BCM_GMAC3 */
4753 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4755 if (in_interrupt()) {
4756 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4757 __FUNCTION__
, __LINE__
);
4760 if (dhdp
->info
->rxthread_enabled
) {
4764 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
4768 /* If the receive is not processed inside an ISR,
4769 * the softirqd must be woken explicitly to service
4770 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4771 * by netif_rx_ni(), but in earlier kernels, we need
4772 * to do it manually.
4774 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4775 __FUNCTION__
, __LINE__
);
4776 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4781 local_irq_save(flags
);
4783 local_irq_restore(flags
);
4784 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4788 if (dhdp
->info
->rxthread_enabled
&& skbhead
)
4789 dhd_sched_rxf(dhdp
, skbhead
);
4795 __dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4798 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
4799 struct ether_header
*eh
= NULL
;
4800 #if defined(DHD_L2_FILTER)
4801 dhd_if_t
*ifp
= dhd_get_ifp(dhdp
, ifidx
);
4804 /* Reject if down */
4805 if (!dhdp
->up
|| (dhdp
->busstate
== DHD_BUS_DOWN
)) {
4806 /* free the packet here since the caller won't */
4807 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4811 #ifdef PCIE_FULL_DONGLE
4812 if (dhdp
->busstate
== DHD_BUS_SUSPEND
) {
4813 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
4814 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4815 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4818 return NETDEV_TX_BUSY
;
4819 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4821 #endif /* PCIE_FULL_DONGLE */
4823 #ifdef DHD_L2_FILTER
4824 /* if dhcp_unicast is enabled, we need to convert the */
4825 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4826 if (ifp
->dhcp_unicast
) {
4828 uint8
* ehptr
= NULL
;
4830 ret
= bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp
->osh
, pktbuf
, ifidx
, &mac_addr
);
4831 if (ret
== BCME_OK
) {
4832 /* if given mac address having valid entry in sta list
4833 * copy the given mac address, and return with BCME_OK
4835 if (dhd_find_sta(dhdp
, ifidx
, mac_addr
)) {
4836 ehptr
= PKTDATA(dhdp
->osh
, pktbuf
);
4837 bcopy(mac_addr
, ehptr
+ ETHER_DEST_OFFSET
, ETHER_ADDR_LEN
);
4842 if (ifp
->grat_arp
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4843 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
4844 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4849 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4850 ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, TRUE
);
4852 /* Drop the packets if l2 filter has processed it already
4853 * otherwise continue with the normal path
4855 if (ret
== BCME_OK
) {
4856 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4860 #endif /* DHD_L2_FILTER */
4861 /* Update multicast statistic */
4862 if (PKTLEN(dhdp
->osh
, pktbuf
) >= ETHER_HDR_LEN
) {
4863 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
4864 eh
= (struct ether_header
*)pktdata
;
4866 if (ETHER_ISMULTI(eh
->ether_dhost
))
4867 dhdp
->tx_multicast
++;
4868 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_802_1X
) {
4869 #ifdef DHD_LOSSLESS_ROAMING
4870 uint8 prio
= (uint8
)PKTPRIO(pktbuf
);
4872 /* back up 802.1x's priority */
4873 dhdp
->prio_8021x
= prio
;
4874 #endif /* DHD_LOSSLESS_ROAMING */
4875 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED
);
4876 atomic_inc(&dhd
->pend_8021x_cnt
);
4877 #if defined(DHD_8021X_DUMP)
4878 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4879 #endif /* DHD_8021X_DUMP */
4880 dhd_conf_set_eapol_status(dhdp
, dhd_ifname(dhdp
, ifidx
), pktdata
);
4883 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) {
4884 #ifdef DHD_DHCP_DUMP
4885 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4886 #endif /* DHD_DHCP_DUMP */
4887 #ifdef DHD_ICMP_DUMP
4888 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4889 #endif /* DHD_ICMP_DUMP */
4892 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4897 /* Look into the packet and update the packet priority */
4898 #ifndef PKTPRIO_OVERRIDE
4899 if (PKTPRIO(pktbuf
) == 0)
4900 #endif /* !PKTPRIO_OVERRIDE */
4902 #if defined(QOS_MAP_SET)
4903 pktsetprio_qms(pktbuf
, wl_get_up_table(dhdp
, ifidx
), FALSE
);
4905 pktsetprio(pktbuf
, FALSE
);
4906 #endif /* QOS_MAP_SET */
4911 #if defined(TRAFFIC_MGMT_DWM)
4912 traffic_mgmt_pkt_set_prio(dhdp
, pktbuf
);
4915 DHD_PKT_SET_DATAOFF(pktbuf
, 0);
4916 #endif /* BCM_GMAC3 */
4919 #ifdef PCIE_FULL_DONGLE
4921 * Lkup the per interface hash table, for a matching flowring. If one is not
4922 * available, allocate a unique flowid and add a flowring entry.
4923 * The found or newly created flowid is placed into the pktbuf's tag.
4925 ret
= dhd_flowid_update(dhdp
, ifidx
, dhdp
->flow_prio_map
[(PKTPRIO(pktbuf
))], pktbuf
);
4926 if (ret
!= BCME_OK
) {
4927 PKTCFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
4932 #if defined(DHD_TX_DUMP)
4933 dhd_trx_dump(dhd_idx2net(dhdp
, ifidx
), PKTDATA(dhdp
->osh
, pktbuf
),
4934 PKTLEN(dhdp
->osh
, pktbuf
), TRUE
);
4936 /* terence 20150901: Micky add to ajust the 802.1X priority */
4937 /* Set the 802.1X packet with the highest priority 7 */
4938 if (dhdp
->conf
->pktprio8021x
>= 0)
4939 pktset8021xprio(pktbuf
, dhdp
->conf
->pktprio8021x
);
4941 #ifdef PROP_TXSTATUS
4942 if (dhd_wlfc_is_supported(dhdp
)) {
4943 /* store the interface ID */
4944 DHD_PKTTAG_SETIF(PKTTAG(pktbuf
), ifidx
);
4946 /* store destination MAC in the tag as well */
4947 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf
), eh
->ether_dhost
);
4949 /* decide which FIFO this packet belongs to */
4950 if (ETHER_ISMULTI(eh
->ether_dhost
))
4951 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4952 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), AC_COUNT
);
4954 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), WME_PRIO2AC(PKTPRIO(pktbuf
)));
4956 #endif /* PROP_TXSTATUS */
4958 /* If the protocol uses a data header, apply it */
4959 dhd_prot_hdrpush(dhdp
, ifidx
, pktbuf
);
4962 /* Use bus module to send data frame */
4964 dhd_htsf_addtxts(dhdp
, pktbuf
);
4966 #ifdef PROP_TXSTATUS
4968 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_bus_txdata
,
4969 dhdp
->bus
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
4970 /* non-proptxstatus way */
4972 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4974 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4975 #endif /* BCMPCIE */
4980 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4982 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4983 #endif /* BCMPCIE */
4984 #endif /* PROP_TXSTATUS */
4987 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4988 #endif /* BCMDBUS */
4994 dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4997 unsigned long flags
;
4999 DHD_GENERAL_LOCK(dhdp
, flags
);
5000 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
5001 DHD_ERROR(("%s: returning as busstate=%d\n",
5002 __FUNCTION__
, dhdp
->busstate
));
5003 DHD_GENERAL_UNLOCK(dhdp
, flags
);
5004 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5007 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp
);
5008 DHD_GENERAL_UNLOCK(dhdp
, flags
);
5010 #ifdef DHD_PCIE_RUNTIMEPM
5011 if (dhdpcie_runtime_bus_wake(dhdp
, FALSE
, __builtin_return_address(0))) {
5012 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
5013 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5017 #endif /* DHD_PCIE_RUNTIMEPM */
5019 DHD_GENERAL_LOCK(dhdp
, flags
);
5020 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
5021 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5022 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
5023 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
5024 dhd_os_busbusy_wake(dhdp
);
5025 DHD_GENERAL_UNLOCK(dhdp
, flags
);
5026 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5029 DHD_GENERAL_UNLOCK(dhdp
, flags
);
5031 ret
= __dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
5033 #ifdef DHD_PCIE_RUNTIMEPM
5036 DHD_GENERAL_LOCK(dhdp
, flags
);
5037 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
5038 dhd_os_busbusy_wake(dhdp
);
5039 DHD_GENERAL_UNLOCK(dhdp
, flags
);
5043 #if defined(DHD_LB_TXP)
5046 dhd_lb_sendpkt(dhd_info_t
*dhd
, struct net_device
*net
,
5047 int ifidx
, void *skb
)
5049 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->tx_start_percpu_run_cnt
);
5051 /* If the feature is disabled run-time do TX from here */
5052 if (atomic_read(&dhd
->lb_txp_active
) == 0) {
5053 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
5054 return __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
5057 /* Store the address of net device and interface index in the Packet tag */
5058 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), net
);
5059 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), ifidx
);
5061 /* Enqueue the skb into tx_pend_queue */
5062 skb_queue_tail(&dhd
->tx_pend_queue
, skb
);
5064 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__
, skb
, net
));
5066 /* Dispatch the Tx job to be processed by the tx_tasklet */
5067 dhd_lb_tx_dispatch(&dhd
->pub
);
5069 return NETDEV_TX_OK
;
5071 #endif /* DHD_LB_TXP */
5074 dhd_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
5079 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
5080 dhd_if_t
*ifp
= NULL
;
5082 unsigned long flags
;
5084 uint8 htsfdlystat_sz
= dhd
->pub
.htsfdlystat_sz
;
5086 uint8 htsfdlystat_sz
= 0;
5089 struct ether_header
*eh
;
5091 #endif /* DHD_WMF */
5093 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5095 if (dhd_query_bus_erros(&dhd
->pub
)) {
5099 /* terence 2017029: Reject in early suspend */
5100 if (!dhd
->pub
.conf
->xmit_in_suspend
&& dhd
->pub
.early_suspended
) {
5101 dhd_txflowcontrol(&dhd
->pub
, ALL_INTERFACES
, ON
);
5102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5105 return NETDEV_TX_BUSY
;
5109 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5110 DHD_BUS_BUSY_SET_IN_TX(&dhd
->pub
);
5111 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5113 #ifdef DHD_PCIE_RUNTIMEPM
5114 if (dhdpcie_runtime_bus_wake(&dhd
->pub
, FALSE
, dhd_start_xmit
)) {
5115 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
5116 /* stop the network queue temporarily until resume done */
5117 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5118 if (!dhdpcie_is_resume_done(&dhd
->pub
)) {
5119 dhd_bus_stop_queue(dhd
->pub
.bus
);
5121 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5122 dhd_os_busbusy_wake(&dhd
->pub
);
5123 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5124 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5127 return NETDEV_TX_BUSY
;
5130 #endif /* DHD_PCIE_RUNTIMEPM */
5132 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5134 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
5135 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5136 __FUNCTION__
, dhd
->pub
.busstate
, dhd
->pub
.dhd_bus_busy_state
));
5137 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5138 #ifdef PCIE_FULL_DONGLE
5139 /* Stop tx queues if suspend is in progress */
5140 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
5141 dhd_bus_stop_queue(dhd
->pub
.bus
);
5143 #endif /* PCIE_FULL_DONGLE */
5144 dhd_os_busbusy_wake(&dhd
->pub
);
5145 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5146 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5149 return NETDEV_TX_BUSY
;
5153 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
5154 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5155 __FUNCTION__
, dhd
->pub
.busstate
, dhd
->pub
.dhd_bus_busy_state
));
5159 DHD_OS_WAKE_LOCK(&dhd
->pub
);
5160 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5163 #if defined(DHD_HANG_SEND_UP_TEST)
5164 if (dhd
->pub
.req_hang_type
== HANG_REASON_BUS_DOWN
) {
5165 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
5167 #endif /* DHD_HANG_SEND_UP_TEST */
5169 /* Reject if down */
5170 if (dhd
->pub
.hang_was_sent
|| DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd
->pub
)) {
5171 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
5172 __FUNCTION__
, dhd
->pub
.up
, dhd
->pub
.busstate
));
5173 netif_stop_queue(net
);
5174 /* Send Event when bus down detected during data session */
5175 if (dhd
->pub
.up
&& !dhd
->pub
.hang_was_sent
&& !DHD_BUS_CHECK_REMOVE(&dhd
->pub
)) {
5176 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__
));
5177 dhd
->pub
.hang_reason
= HANG_REASON_BUS_DOWN
;
5178 net_os_send_hang_message(net
);
5180 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5181 dhd_os_busbusy_wake(&dhd
->pub
);
5182 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5183 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5184 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5185 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5188 return NETDEV_TX_BUSY
;
5192 ifp
= DHD_DEV_IFP(net
);
5193 ifidx
= DHD_DEV_IFIDX(net
);
5194 if (ifidx
== DHD_BAD_IF
) {
5195 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__
, ifidx
));
5196 netif_stop_queue(net
);
5197 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5198 dhd_os_busbusy_wake(&dhd
->pub
);
5199 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5200 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5201 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5202 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5205 return NETDEV_TX_BUSY
;
5209 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5211 ASSERT(ifidx
== dhd_net2idx(dhd
, net
));
5212 ASSERT((ifp
!= NULL
) && ((ifidx
< DHD_MAX_IFS
) && (ifp
== dhd
->iflist
[ifidx
])));
5214 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
5216 /* re-align socket buffer if "skb->data" is odd address */
5217 if (((unsigned long)(skb
->data
)) & 0x1) {
5218 unsigned char *data
= skb
->data
;
5219 uint32 length
= skb
->len
;
5220 PKTPUSH(dhd
->pub
.osh
, skb
, 1);
5221 memmove(skb
->data
, data
, length
);
5222 PKTSETLEN(dhd
->pub
.osh
, skb
, length
);
5225 datalen
= PKTLEN(dhd
->pub
.osh
, skb
);
5227 /* Make sure there's enough room for any header */
5228 if (skb_headroom(skb
) < dhd
->pub
.hdrlen
+ htsfdlystat_sz
) {
5229 struct sk_buff
*skb2
;
5231 DHD_INFO(("%s: insufficient headroom\n",
5232 dhd_ifname(&dhd
->pub
, ifidx
)));
5233 dhd
->pub
.tx_realloc
++;
5235 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
5236 skb2
= skb_realloc_headroom(skb
, dhd
->pub
.hdrlen
+ htsfdlystat_sz
);
5239 if ((skb
= skb2
) == NULL
) {
5240 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
5241 dhd_ifname(&dhd
->pub
, ifidx
)));
5245 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
5248 /* Convert to packet */
5249 if (!(pktbuf
= PKTFRMNATIVE(dhd
->pub
.osh
, skb
))) {
5250 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
5251 dhd_ifname(&dhd
->pub
, ifidx
)));
5252 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
5253 dev_kfree_skb_any(skb
);
5258 #if defined(WLMEDIA_HTSF)
5259 if (htsfdlystat_sz
&& PKTLEN(dhd
->pub
.osh
, pktbuf
) >= ETHER_ADDR_LEN
) {
5260 uint8
*pktdata
= (uint8
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
5261 struct ether_header
*eh
= (struct ether_header
*)pktdata
;
5263 if (!ETHER_ISMULTI(eh
->ether_dhost
) &&
5264 (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
)) {
5265 eh
->ether_type
= hton16(ETHER_TYPE_BRCM_PKTDLYSTATS
);
5270 /* wet related packet proto manipulation should be done in DHD
5271 since dongle doesn't have complete payload
5273 if (WET_ENABLED(&dhd
->pub
) &&
5274 (dhd_wet_send_proc(dhd
->pub
.wet_info
, pktbuf
, &pktbuf
) < 0)) {
5275 DHD_INFO(("%s:%s: wet send proc failed\n",
5276 __FUNCTION__
, dhd_ifname(&dhd
->pub
, ifidx
)));
5277 PKTFREE(dhd
->pub
.osh
, pktbuf
, FALSE
);
5281 #endif /* DHD_WET */
5284 eh
= (struct ether_header
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
5285 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
5287 /* WMF processing for multicast packets
5288 * Only IPv4 packets are handled
5290 if (ifp
->wmf
.wmf_enable
&& (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) &&
5291 (IP_VER(iph
) == IP_VER_4
) && (ETHER_ISMULTI(eh
->ether_dhost
) ||
5292 ((IPV4_PROT(iph
) == IP_PROT_IGMP
) && dhd
->pub
.wmf_ucast_igmp
))) {
5293 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
5295 bool ucast_convert
= FALSE
;
5296 #ifdef DHD_UCAST_UPNP
5299 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
5300 ucast_convert
= dhd
->pub
.wmf_ucast_upnp
&& MCAST_ADDR_UPNP_SSDP(dest_ip
);
5301 #endif /* DHD_UCAST_UPNP */
5302 #ifdef DHD_IGMP_UCQUERY
5303 ucast_convert
|= dhd
->pub
.wmf_ucast_igmp_query
&&
5304 (IPV4_PROT(iph
) == IP_PROT_IGMP
) &&
5305 (*(iph
+ IPV4_HLEN(iph
)) == IGMPV2_HOST_MEMBERSHIP_QUERY
);
5306 #endif /* DHD_IGMP_UCQUERY */
5307 if (ucast_convert
) {
5309 unsigned long flags
;
5310 struct list_head snapshot_list
;
5311 struct list_head
*wmf_ucforward_list
;
5315 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
5316 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
5318 wmf_ucforward_list
= DHD_IF_WMF_UCFORWARD_LOCK(dhd
, ifp
, &snapshot_list
);
5320 /* Convert upnp/igmp query to unicast for each assoc STA */
5321 list_for_each_entry(sta
, wmf_ucforward_list
, list
) {
5322 /* Skip sending to proxy interfaces of proxySTA */
5323 if (sta
->psta_prim
!= NULL
&& !ifp
->wmf_psta_disable
) {
5326 if ((sdu_clone
= PKTDUP(dhd
->pub
.osh
, pktbuf
)) == NULL
) {
5330 dhd_wmf_forward(ifp
->wmf
.wmfh
, sdu_clone
, 0, sta
, 1);
5332 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd
, wmf_ucforward_list
);
5334 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5335 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5336 dhd_os_busbusy_wake(&dhd
->pub
);
5337 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5338 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5339 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5341 if (ret
== NETDEV_TX_OK
)
5342 PKTFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
5346 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
5348 /* There will be no STA info if the packet is coming from LAN host
5351 ret
= dhd_wmf_packets_handle(&dhd
->pub
, pktbuf
, NULL
, ifidx
, 0);
5355 /* Either taken by WMF or we should drop it.
5359 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5360 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5361 dhd_os_busbusy_wake(&dhd
->pub
);
5362 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5363 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5364 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5365 return NETDEV_TX_OK
;
5367 /* Continue the transmit path */
5372 #endif /* DHD_WMF */
5374 /* PSR related packet proto manipulation should be done in DHD
5375 * since dongle doesn't have complete payload
5377 if (PSR_ENABLED(&dhd
->pub
) && (dhd_psta_proc(&dhd
->pub
,
5378 ifidx
, &pktbuf
, TRUE
) < 0)) {
5379 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__
,
5380 dhd_ifname(&dhd
->pub
, ifidx
)));
5382 #endif /* DHD_PSTA */
5384 #ifdef DHDTCPACK_SUPPRESS
5385 if (dhd
->pub
.tcpack_sup_mode
== TCPACK_SUP_HOLD
) {
5386 /* If this packet has been hold or got freed, just return */
5387 if (dhd_tcpack_hold(&dhd
->pub
, pktbuf
, ifidx
)) {
5392 /* If this packet has replaced another packet and got freed, just return */
5393 if (dhd_tcpack_suppress(&dhd
->pub
, pktbuf
)) {
5398 #endif /* DHDTCPACK_SUPPRESS */
5401 * If Load Balance is enabled queue the packet
5402 * else send directly from here.
5404 #if defined(DHD_LB_TXP)
5405 ret
= dhd_lb_sendpkt(dhd
, net
, ifidx
, pktbuf
);
5407 ret
= __dhd_sendpkt(&dhd
->pub
, ifidx
, pktbuf
);
5412 ifp
->stats
.tx_dropped
++;
5413 dhd
->pub
.tx_dropped
++;
5415 #ifdef PROP_TXSTATUS
5416 /* tx_packets counter can counted only when wlfc is disabled */
5417 if (!dhd_wlfc_is_supported(&dhd
->pub
))
5420 dhd
->pub
.tx_packets
++;
5421 ifp
->stats
.tx_packets
++;
5422 ifp
->stats
.tx_bytes
+= datalen
;
5427 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5428 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
5429 dhd_os_busbusy_wake(&dhd
->pub
);
5430 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5431 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
5432 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5433 /* Return ok: we always eat the packet */
5434 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5437 return NETDEV_TX_OK
;
5443 dhd_txflowcontrol(dhd_pub_t
*dhdp
, int ifidx
, bool state
)
5445 struct net_device
*net
;
5446 dhd_info_t
*dhd
= dhdp
->info
;
5449 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5453 #ifdef DHD_LOSSLESS_ROAMING
5454 /* block flowcontrol during roaming */
5455 if ((dhdp
->dequeue_prec_map
== 1 << PRIO_8021D_NC
) && state
== ON
) {
5460 if (ifidx
== ALL_INTERFACES
) {
5461 /* Flow control on all active interfaces */
5462 dhdp
->txoff
= state
;
5463 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
5464 if (dhd
->iflist
[i
]) {
5465 net
= dhd
->iflist
[i
]->net
;
5467 netif_stop_queue(net
);
5469 netif_wake_queue(net
);
5473 if (dhd
->iflist
[ifidx
]) {
5474 net
= dhd
->iflist
[ifidx
]->net
;
5476 netif_stop_queue(net
);
5478 netif_wake_queue(net
);
5486 dhd_is_rxthread_enabled(dhd_pub_t
*dhdp
)
5488 dhd_info_t
*dhd
= dhdp
->info
;
5490 return dhd
->rxthread_enabled
;
5492 #endif /* DHD_WMF */
5494 #ifdef DHD_MCAST_REGEN
5496 * Description: This function is called to do the reverse translation
5498 * Input eh - pointer to the ethernet header
5501 dhd_mcast_reverse_translation(struct ether_header
*eh
)
5506 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
5507 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
5509 /* Only IP packets are handled */
5510 if (eh
->ether_type
!= hton16(ETHER_TYPE_IP
))
5513 /* Non-IPv4 multicast packets are not handled */
5514 if (IP_VER(iph
) != IP_VER_4
)
5518 * The packet has a multicast IP and unicast MAC. That means
5519 * we have to do the reverse translation
5521 if (IPV4_ISMULTI(dest_ip
) && !ETHER_ISMULTI(&eh
->ether_dhost
)) {
5522 ETHER_FILL_MCAST_ADDR_FROM_IP(eh
->ether_dhost
, dest_ip
);
5528 #endif /* MCAST_REGEN */
5530 #ifdef SHOW_LOGTRACE
5532 dhd_event_logtrace_pkt_process(dhd_pub_t
*dhdp
, struct sk_buff
* skb
)
5534 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5537 bcm_event_msg_u_t evu
;
5539 void *pktdata
= NULL
;
5540 bcm_event_t
*pvt_data
;
5543 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
5545 /* In dhd_rx_frame, header is stripped using skb_pull
5546 * of size ETH_HLEN, so adjust pktlen accordingly
5548 pktlen
= skb
->len
+ ETH_HLEN
;
5550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5551 pktdata
= (void *)skb_mac_header(skb
);
5553 pktdata
= (void *)skb
->mac
.raw
;
5554 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5556 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
5558 if (ret
!= BCME_OK
) {
5559 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5560 __FUNCTION__
, ret
));
5564 datalen
= ntoh32(evu
.event
.datalen
);
5566 pvt_data
= (bcm_event_t
*)pktdata
;
5567 data
= &pvt_data
[1];
5569 dhd_dbg_trace_evnt_handler(dhdp
, data
, &dhd
->event_data
, datalen
);
5576 dhd_event_logtrace_process(struct work_struct
* work
)
5578 /* Ignore compiler warnings due to -Werror=cast-qual */
5579 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5580 #pragma GCC diagnostic push
5581 #pragma GCC diagnostic ignored "-Wcast-qual"
5583 struct dhd_info
*dhd
=
5584 container_of(work
, struct dhd_info
, event_log_dispatcher_work
);
5585 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5586 #pragma GCC diagnostic pop
5590 struct sk_buff
*skb
;
5593 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
5600 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__
));
5604 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
5606 /* Run while(1) loop till all skbs are dequeued */
5607 while ((skb
= skb_dequeue(&dhd
->evt_trace_queue
)) != NULL
) {
5608 #ifdef PCIE_FULL_DONGLE
5610 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
5611 if (ifid
== DHD_EVENT_IF
) {
5612 dhd_event_logtrace_infobuf_pkt_process(dhdp
, skb
, &dhd
->event_data
);
5613 /* For sending skb to network layer, convert it to Native PKT
5614 * after that assign skb->dev with Primary interface n/w device
5615 * as for infobuf events, we are sending special DHD_EVENT_IF
5617 #ifdef DHD_USE_STATIC_CTRLBUF
5618 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5620 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5621 #endif /* DHD_USE_STATIC_CTRLBUF */
5625 dhd_event_logtrace_pkt_process(dhdp
, skb
);
5628 dhd_event_logtrace_pkt_process(dhdp
, skb
);
5629 #endif /* PCIE_FULL_DONGLE */
5631 /* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5632 * macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD
5633 * else it is always sent up to network layers.
5635 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5636 #ifdef DHD_USE_STATIC_CTRLBUF
5637 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5639 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5640 #endif /* DHD_USE_STATIC_CTRLBUF */
5641 #else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5642 /* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI
5643 * Also as we are not in INTR context, do not call netif_rx, instead call
5644 * netif_rx_ni (for kerenl >= 2.6) which does netif_rx, disables irq, raise
5645 * NET_IF_RX softirq and enables interrupts back
5647 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5653 local_irq_save(flags
);
5655 local_irq_restore(flags
);
5657 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5658 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5663 dhd_event_logtrace_enqueue(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
5665 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5667 #ifdef PCIE_FULL_DONGLE
5668 /* Add ifidx in the PKTTAG */
5669 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pktbuf
), ifidx
);
5670 #endif /* PCIE_FULL_DONGLE */
5671 skb_queue_tail(&dhd
->evt_trace_queue
, pktbuf
);
5673 schedule_work(&dhd
->event_log_dispatcher_work
);
5677 dhd_event_logtrace_flush_queue(dhd_pub_t
*dhdp
)
5679 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5680 struct sk_buff
*skb
;
5682 while ((skb
= skb_dequeue(&dhd
->evt_trace_queue
)) != NULL
) {
5683 #ifdef DHD_USE_STATIC_CTRLBUF
5684 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5686 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5687 #endif /* DHD_USE_STATIC_CTRLBUF */
5690 #endif /* SHOW_LOGTRACE */
5692 /** Called when a frame is received by the dongle on interface 'ifidx' */
5694 dhd_rx_frame(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
, int numpkt
, uint8 chan
)
5696 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5697 struct sk_buff
*skb
;
5700 void *data
, *pnext
= NULL
;
5703 wl_event_msg_t event
;
5706 void *skbhead
= NULL
;
5707 void *skbprev
= NULL
;
5709 unsigned char *dump_data
;
5710 #ifdef DHD_MCAST_REGEN
5711 uint8 interface_role
;
5712 if_flow_lkup_t
*if_flow_lkup
;
5713 unsigned long flags
;
5715 #ifdef DHD_WAKE_STATUS
5717 wake_counts_t
*wcp
= NULL
;
5718 #endif /* DHD_WAKE_STATUS */
5720 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5722 for (i
= 0; pktbuf
&& i
< numpkt
; i
++, pktbuf
= pnext
) {
5723 struct ether_header
*eh
;
5725 pnext
= PKTNEXT(dhdp
->osh
, pktbuf
);
5726 PKTSETNEXT(dhdp
->osh
, pktbuf
, NULL
);
5728 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5729 * special ifidx of DHD_EVENT_IF. This is just internal to dhd to get the data from
5730 * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5732 if (ifidx
== DHD_EVENT_IF
) {
5733 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5734 * context in case of PCIe FD, in case of other bus this will be from
5735 * DPC context. If we get bunch of events from Dongle then printing all
5736 * of them from Tasklet/DPC context that too in data path is costly.
5737 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5738 * events with type WLC_E_TRACE.
5739 * We'll print this console logs from the WorkQueue context by enqueing SKB
5740 * here and Dequeuing will be done in WorkQueue and will be freed only if
5741 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
5743 #ifdef SHOW_LOGTRACE
5744 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
5745 #else /* !SHOW_LOGTRACE */
5746 /* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF,
5747 * free the PKT here itself
5749 #ifdef DHD_USE_STATIC_CTRLBUF
5750 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5752 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5753 #endif /* DHD_USE_STATIC_CTRLBUF */
5754 #endif /* SHOW_LOGTRACE */
5757 #ifdef DHD_WAKE_STATUS
5761 pkt_wake
= dhd_bus_get_bus_wake(dhdp
);
5762 wcp
= dhd_bus_get_wakecount(dhdp
);
5763 #endif /* BCMDBUS */
5765 /* If wakeinfo count buffer is null do not update wake count values */
5768 #endif /* DHD_WAKE_STATUS */
5770 ifp
= dhd
->iflist
[ifidx
];
5772 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5774 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5778 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5780 /* Dropping only data packets before registering net device to avoid kernel panic */
5781 #ifndef PROP_TXSTATUS_VSDB
5782 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
) &&
5783 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5785 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
|| !dhd
->pub
.up
) &&
5786 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5787 #endif /* PROP_TXSTATUS_VSDB */
5789 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5791 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5795 #ifdef PROP_TXSTATUS
5796 if (dhd_wlfc_is_header_only_pkt(dhdp
, pktbuf
)) {
5797 /* WLFC may send header only packet when
5798 there is an urgent message but no packet to
5801 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5805 #ifdef DHD_L2_FILTER
5806 /* If block_ping is enabled drop the ping packet */
5807 if (ifp
->block_ping
) {
5808 if (bcm_l2_filter_block_ping(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5809 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5813 if (ifp
->grat_arp
&& DHD_IF_ROLE_STA(dhdp
, ifidx
)) {
5814 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5815 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5819 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
5820 int ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, FALSE
);
5822 /* Drop the packets if l2 filter has processed it already
5823 * otherwise continue with the normal path
5825 if (ret
== BCME_OK
) {
5826 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5830 #endif /* DHD_L2_FILTER */
5832 #ifdef DHD_MCAST_REGEN
5833 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
5834 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
5835 ASSERT(if_flow_lkup
);
5837 interface_role
= if_flow_lkup
[ifidx
].role
;
5838 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
5840 if (ifp
->mcast_regen_bss_enable
&& (interface_role
!= WLC_E_IF_ROLE_WDS
) &&
5841 !DHD_IF_ROLE_AP(dhdp
, ifidx
) &&
5842 ETHER_ISUCAST(eh
->ether_dhost
)) {
5843 if (dhd_mcast_reverse_translation(eh
) == BCME_OK
) {
5845 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5846 if ((dhd_get_psta_mode(dhdp
) == DHD_MODE_PSTA
) ||
5847 (dhd_get_psta_mode(dhdp
) == DHD_MODE_PSR
)) {
5849 /* Let the primary in PSTA interface handle this
5850 * frame after unicast to Multicast conversion
5852 ifp
= dhd_get_ifp(dhdp
, 0);
5859 #endif /* MCAST_REGEN */
5862 /* WMF processing for multicast packets */
5863 if (ifp
->wmf
.wmf_enable
&& (ETHER_ISMULTI(eh
->ether_dhost
))) {
5867 sta
= dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_shost
);
5868 ret
= dhd_wmf_packets_handle(dhdp
, pktbuf
, sta
, ifidx
, 1);
5871 /* The packet is taken by WMF. Continue to next iteration */
5874 /* Packet DROP decision by WMF. Toss it */
5875 DHD_ERROR(("%s: WMF decides to drop packet\n",
5877 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5880 /* Continue the transmit path */
5884 #endif /* DHD_WMF */
5886 #ifdef DHDTCPACK_SUPPRESS
5887 dhd_tcpdata_info_get(dhdp
, pktbuf
);
5889 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
5892 skb
->dev
= ifp
->net
;
5894 /* wet related packet proto manipulation should be done in DHD
5895 * since dongle doesn't have complete payload
5897 if (WET_ENABLED(&dhd
->pub
) && (dhd_wet_recv_proc(dhd
->pub
.wet_info
,
5899 DHD_INFO(("%s:%s: wet recv proc failed\n",
5900 __FUNCTION__
, dhd_ifname(dhdp
, ifidx
)));
5902 #endif /* DHD_WET */
5905 if (PSR_ENABLED(dhdp
) && (dhd_psta_proc(dhdp
, ifidx
, &pktbuf
, FALSE
) < 0)) {
5906 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__
,
5907 dhd_ifname(dhdp
, ifidx
)));
5909 #endif /* DHD_PSTA */
5911 #ifdef PCIE_FULL_DONGLE
5912 if ((DHD_IF_ROLE_AP(dhdp
, ifidx
) || DHD_IF_ROLE_P2PGO(dhdp
, ifidx
)) &&
5913 (!ifp
->ap_isolate
)) {
5914 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5915 if (ETHER_ISUCAST(eh
->ether_dhost
)) {
5916 if (dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_dhost
)) {
5917 dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
5921 void *npktbuf
= PKTDUP(dhdp
->osh
, pktbuf
);
5923 dhd_sendpkt(dhdp
, ifidx
, npktbuf
);
5926 #endif /* PCIE_FULL_DONGLE */
5928 /* Get the protocol, maintain skb around eth_type_trans()
5929 * The main reason for this hack is for the limitation of
5930 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5931 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5932 * coping of the packet coming from the network stack to add
5933 * BDC, Hardware header etc, during network interface registration
5934 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5935 * for BDC, Hardware header etc. and not just the ETH_HLEN
5940 dump_data
= skb
->data
;
5942 protocol
= (skb
->data
[12] << 8) | skb
->data
[13];
5943 if (protocol
== ETHER_TYPE_802_1X
) {
5944 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED
);
5945 #ifdef DHD_8021X_DUMP
5946 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5947 #endif /* DHD_8021X_DUMP */
5948 dhd_conf_set_eapol_status(dhdp
, dhd_ifname(dhdp
, ifidx
), dump_data
);
5951 if (protocol
!= ETHER_TYPE_BRCM
&& protocol
== ETHER_TYPE_IP
) {
5952 #ifdef DHD_DHCP_DUMP
5953 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5954 #endif /* DHD_DHCP_DUMP */
5955 #ifdef DHD_ICMP_DUMP
5956 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5957 #endif /* DHD_ICMP_DUMP */
5960 dhd_trx_dump(dhd_idx2net(dhdp
, ifidx
), dump_data
, skb
->len
, FALSE
);
5961 #endif /* DHD_RX_DUMP */
5962 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5964 prhex("[wakepkt_dump]", (char*)dump_data
, MIN(len
, 32));
5966 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5968 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5970 if (skb
->pkt_type
== PACKET_MULTICAST
) {
5971 dhd
->pub
.rx_multicast
++;
5972 ifp
->stats
.multicast
++;
5979 dhd_htsf_addrxts(dhdp
, pktbuf
);
5982 DHD_DBG_PKT_MON_RX(dhdp
, skb
);
5983 #endif /* DBG_PKT_MON */
5984 #ifdef DHD_PKT_LOGGING
5985 DHD_PKTLOG_RX(dhdp
, skb
);
5986 #endif /* DHD_PKT_LOGGING */
5987 /* Strip header, count, deliver upward */
5988 skb_pull(skb
, ETH_HLEN
);
5990 /* Process special event packets and then discard them */
5991 memset(&event
, 0, sizeof(event
));
5993 if (ntoh16(skb
->protocol
) == ETHER_TYPE_BRCM
) {
5994 bcm_event_msg_u_t evu
;
5998 ret_event
= wl_host_event_get_data(
5999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
6000 skb_mac_header(skb
),
6003 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
6006 if (ret_event
!= BCME_OK
) {
6007 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
6008 __FUNCTION__
, ret_event
));
6009 #ifdef DHD_USE_STATIC_CTRLBUF
6010 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
6012 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
6017 memcpy(&event
, &evu
.event
, sizeof(wl_event_msg_t
));
6018 event_type
= ntoh32_ua((void *)&event
.event_type
);
6019 #ifdef SHOW_LOGTRACE
6020 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
6021 * context in case of PCIe FD, in case of other bus this will be from
6022 * DPC context. If we get bunch of events from Dongle then printing all
6023 * of them from Tasklet/DPC context that too in data path is costly.
6024 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
6025 * events with type WLC_E_TRACE.
6026 * We'll print this console logs from the WorkQueue context by enqueing SKB
6027 * here and Dequeuing will be done in WorkQueue and will be freed only if
6028 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
6030 if (event_type
== WLC_E_TRACE
) {
6031 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__
));
6032 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
6035 #endif /* SHOW_LOGTRACE */
6037 ret_event
= dhd_wl_host_event(dhd
, ifidx
,
6038 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
6039 skb_mac_header(skb
),
6042 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
6043 len
, &event
, &data
);
6045 wl_event_to_host_order(&event
);
6047 tout_ctrl
= DHD_PACKET_TIMEOUT_MS
;
6049 #if defined(PNO_SUPPORT)
6050 if (event_type
== WLC_E_PFN_NET_FOUND
) {
6051 /* enforce custom wake lock to garantee that Kernel not suspended */
6052 tout_ctrl
= CUSTOM_PNO_EVENT_LOCK_xTIME
* DHD_PACKET_TIMEOUT_MS
;
6054 #endif /* PNO_SUPPORT */
6056 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
6060 #ifdef DHD_WAKE_STATUS
6061 if (unlikely(pkt_wake
)) {
6062 #ifdef DHD_WAKE_EVENT_STATUS
6063 if (event
.event_type
< WLC_E_LAST
) {
6064 wcp
->rc_event
[event
.event_type
]++;
6068 #endif /* DHD_WAKE_EVENT_STATUS */
6070 #endif /* DHD_WAKE_STATUS */
6072 /* For delete virtual interface event, wl_host_event returns positive
6073 * i/f index, do not proceed. just free the pkt.
6075 if ((event_type
== WLC_E_IF
) && (ret_event
> 0)) {
6076 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
6078 #ifdef DHD_USE_STATIC_CTRLBUF
6079 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
6081 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
6086 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
6087 #ifdef DHD_USE_STATIC_CTRLBUF
6088 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
6090 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
6091 #endif /* DHD_USE_STATIC_CTRLBUF */
6095 * For the event packets, there is a possibility
6096 * of ifidx getting modifed.Thus update the ifp
6099 ASSERT(ifidx
< DHD_MAX_IFS
&& dhd
->iflist
[ifidx
]);
6100 ifp
= dhd
->iflist
[ifidx
];
6101 #ifndef PROP_TXSTATUS_VSDB
6102 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
)))
6104 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
) &&
6106 #endif /* PROP_TXSTATUS_VSDB */
6108 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
6110 #ifdef DHD_USE_STATIC_CTRLBUF
6111 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
6113 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
6117 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
6119 tout_rx
= DHD_PACKET_TIMEOUT_MS
;
6121 #ifdef PROP_TXSTATUS
6122 dhd_wlfc_save_rxpath_ac_time(dhdp
, (uint8
)PKTPRIO(skb
));
6123 #endif /* PROP_TXSTATUS */
6125 #ifdef DHD_WAKE_STATUS
6126 if (unlikely(pkt_wake
)) {
6128 #ifdef DHD_WAKE_RX_STATUS
6129 #define ETHER_ICMP6_HEADER 20
6130 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
6131 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
6132 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
6134 if (ntoh16(skb
->protocol
) == ETHER_TYPE_ARP
) /* ARP */
6136 if (dump_data
[0] == 0xFF) { /* Broadcast */
6138 } else if (dump_data
[0] & 0x01) { /* Multicast */
6140 if (ntoh16(skb
->protocol
) == ETHER_TYPE_IPV6
) {
6141 wcp
->rx_multi_ipv6
++;
6142 if ((skb
->len
> ETHER_ICMP6_HEADER
) &&
6143 (dump_data
[ETHER_ICMP6_HEADER
] == IPPROTO_ICMPV6
)) {
6145 if (skb
->len
> ETHER_ICMPV6_TYPE
) {
6146 switch (dump_data
[ETHER_ICMPV6_TYPE
]) {
6147 case NDISC_ROUTER_ADVERTISEMENT
:
6148 wcp
->rx_icmpv6_ra
++;
6150 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
6151 wcp
->rx_icmpv6_na
++;
6153 case NDISC_NEIGHBOUR_SOLICITATION
:
6154 wcp
->rx_icmpv6_ns
++;
6159 } else if (dump_data
[2] == 0x5E) {
6160 wcp
->rx_multi_ipv4
++;
6162 wcp
->rx_multi_other
++;
6164 } else { /* Unicast */
6167 #undef ETHER_ICMP6_HEADER
6168 #undef ETHER_IPV6_SADDR
6169 #undef ETHER_IPV6_DAADR
6170 #undef ETHER_ICMPV6_TYPE
6171 #endif /* DHD_WAKE_RX_STATUS */
6174 #endif /* DHD_WAKE_STATUS */
6177 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
6179 ifp
->net
->last_rx
= jiffies
;
6180 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
6182 if (ntoh16(skb
->protocol
) != ETHER_TYPE_BRCM
) {
6183 dhdp
->dstats
.rx_bytes
+= skb
->len
;
6184 dhdp
->rx_packets
++; /* Local count */
6185 ifp
->stats
.rx_bytes
+= skb
->len
;
6186 ifp
->stats
.rx_packets
++;
6189 if (in_interrupt()) {
6190 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6191 __FUNCTION__
, __LINE__
);
6192 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6193 #if defined(DHD_LB_RXP)
6194 netif_receive_skb(skb
);
6195 #else /* !defined(DHD_LB_RXP) */
6197 #endif /* !defined(DHD_LB_RXP) */
6198 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6200 if (dhd
->rxthread_enabled
) {
6204 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
6208 /* If the receive is not processed inside an ISR,
6209 * the softirqd must be woken explicitly to service
6210 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6211 * by netif_rx_ni(), but in earlier kernels, we need
6212 * to do it manually.
6214 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6215 __FUNCTION__
, __LINE__
);
6217 #if defined(DHD_LB_RXP)
6218 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6219 netif_receive_skb(skb
);
6220 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6221 #else /* !defined(DHD_LB_RXP) */
6222 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6223 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6225 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6228 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6230 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6231 local_irq_save(flags
);
6233 local_irq_restore(flags
);
6234 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6235 #endif /* !defined(DHD_LB_RXP) */
6240 if (dhd
->rxthread_enabled
&& skbhead
)
6241 dhd_sched_rxf(dhdp
, skbhead
);
6243 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp
, tout_rx
);
6244 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp
, tout_ctrl
);
6248 dhd_event(struct dhd_info
*dhd
, char *evpkt
, int evlen
, int ifidx
)
6250 /* Linux version has nothing to do */
6255 dhd_txcomplete(dhd_pub_t
*dhdp
, void *txp
, bool success
)
6257 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
6258 struct ether_header
*eh
;
6261 dhd_prot_hdrpull(dhdp
, NULL
, txp
, NULL
, NULL
);
6264 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, txp
);
6265 type
= ntoh16(eh
->ether_type
);
6267 if ((type
== ETHER_TYPE_802_1X
) && (dhd_get_pend_8021x_cnt(dhd
) > 0)) {
6268 atomic_dec(&dhd
->pend_8021x_cnt
);
6271 #ifdef PROP_TXSTATUS
6272 if (dhdp
->wlfc_state
&& (dhdp
->proptxstatus_mode
!= WLFC_FCMODE_NONE
)) {
6273 dhd_if_t
*ifp
= dhd
->iflist
[DHD_PKTTAG_IF(PKTTAG(txp
))];
6274 uint datalen
= PKTLEN(dhd
->pub
.osh
, txp
);
6277 dhd
->pub
.tx_packets
++;
6278 ifp
->stats
.tx_packets
++;
6279 ifp
->stats
.tx_bytes
+= datalen
;
6281 ifp
->stats
.tx_dropped
++;
6288 static struct net_device_stats
*
6289 dhd_get_stats(struct net_device
*net
)
6291 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
6295 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6298 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
6302 ifidx
= dhd_net2idx(dhd
, net
);
6303 if (ifidx
== DHD_BAD_IF
) {
6304 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__
));
6308 ifp
= dhd
->iflist
[ifidx
];
6312 DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__
));
6317 /* Use the protocol to get dongle stats */
6318 dhd_prot_dstats(&dhd
->pub
);
6323 memset(&net
->stats
, 0, sizeof(net
->stats
));
6329 dhd_watchdog_thread(void *data
)
6331 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6332 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6333 /* This thread doesn't need any user-level access,
6334 * so get rid of all our resources
6336 if (dhd_watchdog_prio
> 0) {
6337 struct sched_param param
;
6338 param
.sched_priority
= (dhd_watchdog_prio
< MAX_RT_PRIO
)?
6339 dhd_watchdog_prio
:(MAX_RT_PRIO
-1);
6340 setScheduler(current
, SCHED_FIFO
, ¶m
);
6344 if (down_interruptible (&tsk
->sema
) == 0) {
6345 unsigned long flags
;
6346 unsigned long jiffies_at_start
= jiffies
;
6347 unsigned long time_lapse
;
6348 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
6350 SMP_RD_BARRIER_DEPENDS();
6351 if (tsk
->terminated
) {
6355 if (dhd
->pub
.dongle_reset
== FALSE
) {
6356 DHD_TIMER(("%s:\n", __FUNCTION__
));
6357 dhd_bus_watchdog(&dhd
->pub
);
6360 /* Call the timesync module watchdog */
6361 dhd_timesync_watchdog(&dhd
->pub
);
6362 #endif /* DHD_TIMESYNC */
6364 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6365 /* Count the tick for reference */
6367 #ifdef DHD_L2_FILTER
6368 dhd_l2_filter_watchdog(&dhd
->pub
);
6369 #endif /* DHD_L2_FILTER */
6370 time_lapse
= jiffies
- jiffies_at_start
;
6372 /* Reschedule the watchdog */
6373 if (dhd
->wd_timer_valid
) {
6374 mod_timer(&dhd
->timer
,
6376 msecs_to_jiffies(dhd_watchdog_ms
) -
6377 min(msecs_to_jiffies(dhd_watchdog_ms
), time_lapse
));
6379 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6381 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6387 complete_and_exit(&tsk
->completed
, 0);
6390 static void dhd_watchdog(
6391 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6392 struct timer_list
*t
6398 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6399 dhd_info_t
*dhd
= from_timer(dhd
, t
, timer
);
6401 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
6403 unsigned long flags
;
6405 if (dhd
->pub
.dongle_reset
) {
6409 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
6410 up(&dhd
->thr_wdt_ctl
.sema
);
6414 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
6415 /* Call the bus module watchdog */
6416 dhd_bus_watchdog(&dhd
->pub
);
6419 /* Call the timesync module watchdog */
6420 dhd_timesync_watchdog(&dhd
->pub
);
6421 #endif /* DHD_TIMESYNC */
6423 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6424 /* Count the tick for reference */
6427 #ifdef DHD_L2_FILTER
6428 dhd_l2_filter_watchdog(&dhd
->pub
);
6429 #endif /* DHD_L2_FILTER */
6430 /* Reschedule the watchdog */
6431 if (dhd
->wd_timer_valid
)
6432 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
6433 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6434 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6437 #ifdef DHD_PCIE_RUNTIMEPM
6439 dhd_rpm_state_thread(void *data
)
6441 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6442 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6445 if (down_interruptible (&tsk
->sema
) == 0) {
6446 unsigned long flags
;
6447 unsigned long jiffies_at_start
= jiffies
;
6448 unsigned long time_lapse
;
6450 SMP_RD_BARRIER_DEPENDS();
6451 if (tsk
->terminated
) {
6455 if (dhd
->pub
.dongle_reset
== FALSE
) {
6456 DHD_TIMER(("%s:\n", __FUNCTION__
));
6458 dhd_runtimepm_state(&dhd
->pub
);
6461 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6462 time_lapse
= jiffies
- jiffies_at_start
;
6464 /* Reschedule the watchdog */
6465 if (dhd
->rpm_timer_valid
) {
6466 mod_timer(&dhd
->rpm_timer
,
6468 msecs_to_jiffies(dhd_runtimepm_ms
) -
6469 min(msecs_to_jiffies(dhd_runtimepm_ms
),
6472 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6479 complete_and_exit(&tsk
->completed
, 0);
6482 static void dhd_runtimepm(
6483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6484 struct timer_list
*t
6490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6491 dhd_info_t
*dhd
= from_timer(dhd
, t
, rpm_timer
);
6493 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
6496 if (dhd
->pub
.dongle_reset
) {
6500 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
6501 up(&dhd
->thr_rpm_ctl
.sema
);
6506 void dhd_runtime_pm_disable(dhd_pub_t
*dhdp
)
6508 dhd_os_runtimepm_timer(dhdp
, 0);
6509 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
6510 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6513 void dhd_runtime_pm_enable(dhd_pub_t
*dhdp
)
6515 if (dhd_get_idletime(dhdp
)) {
6516 dhd_os_runtimepm_timer(dhdp
, dhd_runtimepm_ms
);
6517 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6521 #endif /* DHD_PCIE_RUNTIMEPM */
6524 #ifdef ENABLE_ADAPTIVE_SCHED
6526 dhd_sched_policy(int prio
)
6528 struct sched_param param
;
6529 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH
) {
6530 param
.sched_priority
= 0;
6531 setScheduler(current
, SCHED_NORMAL
, ¶m
);
6533 if (get_scheduler_policy(current
) != SCHED_FIFO
) {
6534 param
.sched_priority
= (prio
< MAX_RT_PRIO
)? prio
: (MAX_RT_PRIO
-1);
6535 setScheduler(current
, SCHED_FIFO
, ¶m
);
6539 #endif /* ENABLE_ADAPTIVE_SCHED */
6540 #ifdef DEBUG_CPU_FREQ
6541 static int dhd_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
6543 dhd_info_t
*dhd
= container_of(nb
, struct dhd_info
, freq_trans
);
6544 struct cpufreq_freqs
*freq
= data
;
6548 if (val
== CPUFREQ_POSTCHANGE
) {
6549 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6550 freq
->new, freq
->cpu
));
6551 *per_cpu_ptr(dhd
->new_freq
, freq
->cpu
) = freq
->new;
6557 #endif /* DEBUG_CPU_FREQ */
6560 dhd_dpc_thread(void *data
)
6562 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6563 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6565 /* This thread doesn't need any user-level access,
6566 * so get rid of all our resources
6568 if (dhd_dpc_prio
> 0)
6570 struct sched_param param
;
6571 param
.sched_priority
= (dhd_dpc_prio
< MAX_RT_PRIO
)?dhd_dpc_prio
:(MAX_RT_PRIO
-1);
6572 setScheduler(current
, SCHED_FIFO
, ¶m
);
6575 #ifdef CUSTOM_DPC_CPUCORE
6576 set_cpus_allowed_ptr(current
, cpumask_of(CUSTOM_DPC_CPUCORE
));
6578 #ifdef CUSTOM_SET_CPUCORE
6579 dhd
->pub
.current_dpc
= current
;
6580 #endif /* CUSTOM_SET_CPUCORE */
6581 /* Run until signal received */
6583 if (dhd
->pub
.conf
->dpc_cpucore
>= 0) {
6584 printf("%s: set dpc_cpucore %d\n", __FUNCTION__
, dhd
->pub
.conf
->dpc_cpucore
);
6585 set_cpus_allowed_ptr(current
, cpumask_of(dhd
->pub
.conf
->dpc_cpucore
));
6586 dhd
->pub
.conf
->dpc_cpucore
= -1;
6588 if (!binary_sema_down(tsk
)) {
6589 #ifdef ENABLE_ADAPTIVE_SCHED
6590 dhd_sched_policy(dhd_dpc_prio
);
6591 #endif /* ENABLE_ADAPTIVE_SCHED */
6592 SMP_RD_BARRIER_DEPENDS();
6593 if (tsk
->terminated
) {
6597 /* Call bus dpc unless it indicated down (then clean stop) */
6598 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6599 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6600 int resched_cnt
= 0;
6601 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6602 dhd_os_wd_timer_extend(&dhd
->pub
, TRUE
);
6603 while (dhd_bus_dpc(dhd
->pub
.bus
)) {
6604 /* process all data */
6605 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6607 if (resched_cnt
> MAX_RESCHED_CNT
) {
6608 DHD_INFO(("%s Calling msleep to"
6609 "let other processes run. \n",
6611 dhd
->pub
.dhd_bug_on
= true;
6615 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6617 dhd_os_wd_timer_extend(&dhd
->pub
, FALSE
);
6618 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6621 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6622 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6628 complete_and_exit(&tsk
->completed
, 0);
6632 dhd_rxf_thread(void *data
)
6634 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6635 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6636 #if defined(WAIT_DEQUEUE)
6637 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6638 ulong watchdogTime
= OSL_SYSUPTIME(); /* msec */
6640 dhd_pub_t
*pub
= &dhd
->pub
;
6642 /* This thread doesn't need any user-level access,
6643 * so get rid of all our resources
6645 if (dhd_rxf_prio
> 0)
6647 struct sched_param param
;
6648 param
.sched_priority
= (dhd_rxf_prio
< MAX_RT_PRIO
)?dhd_rxf_prio
:(MAX_RT_PRIO
-1);
6649 setScheduler(current
, SCHED_FIFO
, ¶m
);
6652 #ifdef CUSTOM_SET_CPUCORE
6653 dhd
->pub
.current_rxf
= current
;
6654 #endif /* CUSTOM_SET_CPUCORE */
6655 /* Run until signal received */
6657 if (dhd
->pub
.conf
->rxf_cpucore
>= 0) {
6658 printf("%s: set rxf_cpucore %d\n", __FUNCTION__
, dhd
->pub
.conf
->rxf_cpucore
);
6659 set_cpus_allowed_ptr(current
, cpumask_of(dhd
->pub
.conf
->rxf_cpucore
));
6660 dhd
->pub
.conf
->rxf_cpucore
= -1;
6662 if (down_interruptible(&tsk
->sema
) == 0) {
6664 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6667 #ifdef ENABLE_ADAPTIVE_SCHED
6668 dhd_sched_policy(dhd_rxf_prio
);
6669 #endif /* ENABLE_ADAPTIVE_SCHED */
6671 SMP_RD_BARRIER_DEPENDS();
6673 if (tsk
->terminated
) {
6676 skb
= dhd_rxf_dequeue(pub
);
6682 void *skbnext
= PKTNEXT(pub
->osh
, skb
);
6683 PKTSETNEXT(pub
->osh
, skb
, NULL
);
6684 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6685 __FUNCTION__
, __LINE__
);
6686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6690 local_irq_save(flags
);
6692 local_irq_restore(flags
);
6697 #if defined(WAIT_DEQUEUE)
6698 if (OSL_SYSUPTIME() - watchdogTime
> RXF_WATCHDOG_TIME
) {
6700 watchdogTime
= OSL_SYSUPTIME();
6704 DHD_OS_WAKE_UNLOCK(pub
);
6709 complete_and_exit(&tsk
->completed
, 0);
6713 void dhd_dpc_enable(dhd_pub_t
*dhdp
)
6715 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6718 if (!dhdp
|| !dhdp
->info
)
6721 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6724 __skb_queue_head_init(&dhd
->rx_pend_queue
);
6725 #endif /* DHD_LB_RXP */
6728 skb_queue_head_init(&dhd
->tx_pend_queue
);
6729 #endif /* DHD_LB_TXP */
6731 #endif /* BCMPCIE */
6735 dhd_dpc_kill(dhd_pub_t
*dhdp
)
6749 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6750 tasklet_kill(&dhd
->tasklet
);
6751 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__
));
6756 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
6757 __skb_queue_purge(&dhd
->rx_pend_queue
);
6758 #endif /* DHD_LB_RXP */
6760 cancel_work_sync(&dhd
->tx_dispatcher_work
);
6761 skb_queue_purge(&dhd
->tx_pend_queue
);
6762 #endif /* DHD_LB_TXP */
6764 /* Kill the Load Balancing Tasklets */
6765 #if defined(DHD_LB_TXC)
6766 tasklet_kill(&dhd
->tx_compl_tasklet
);
6767 #endif /* DHD_LB_TXC */
6768 #if defined(DHD_LB_RXC)
6769 tasklet_kill(&dhd
->rx_compl_tasklet
);
6770 #endif /* DHD_LB_RXC */
6771 #if defined(DHD_LB_TXP)
6772 tasklet_kill(&dhd
->tx_tasklet
);
6773 #endif /* DHD_LB_TXP */
6778 dhd_dpc_tasklet_kill(dhd_pub_t
*dhdp
)
6792 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6793 tasklet_kill(&dhd
->tasklet
);
6796 #endif /* BCMPCIE */
6803 dhd
= (dhd_info_t
*)data
;
6805 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6806 * down below , wake lock is set,
6807 * the tasklet is initialized in dhd_attach()
6809 /* Call bus dpc unless it indicated down (then clean stop) */
6810 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6811 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6812 DHD_LB_STATS_INCR(dhd
->dhd_dpc_cnt
);
6813 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6814 if (dhd_bus_dpc(dhd
->pub
.bus
)) {
6815 tasklet_schedule(&dhd
->tasklet
);
6818 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6823 dhd_sched_dpc(dhd_pub_t
*dhdp
)
6825 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6827 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
6828 DHD_OS_WAKE_LOCK(dhdp
);
6829 /* If the semaphore does not get up,
6830 * wake unlock should be done here
6832 if (!binary_sema_up(&dhd
->thr_dpc_ctl
)) {
6833 DHD_OS_WAKE_UNLOCK(dhdp
);
6837 tasklet_schedule(&dhd
->tasklet
);
6840 #endif /* BCMDBUS */
6843 dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
)
6845 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6846 #ifdef RXF_DEQUEUE_ON_BUSY
6849 #endif /* RXF_DEQUEUE_ON_BUSY */
6851 DHD_OS_WAKE_LOCK(dhdp
);
6853 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6854 #ifdef RXF_DEQUEUE_ON_BUSY
6856 ret
= dhd_rxf_enqueue(dhdp
, skb
);
6857 if (ret
== BCME_OK
|| ret
== BCME_ERROR
)
6860 OSL_SLEEP(50); /* waiting for dequeueing */
6861 } while (retry
-- > 0);
6863 if (retry
<= 0 && ret
== BCME_BUSY
) {
6867 void *skbnext
= PKTNEXT(dhdp
->osh
, skbp
);
6868 PKTSETNEXT(dhdp
->osh
, skbp
, NULL
);
6869 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6870 __FUNCTION__
, __LINE__
);
6874 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
6876 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
6877 up(&dhd
->thr_rxf_ctl
.sema
);
6880 #else /* RXF_DEQUEUE_ON_BUSY */
6882 if (dhd_rxf_enqueue(dhdp
, skb
) == BCME_OK
)
6885 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
6886 up(&dhd
->thr_rxf_ctl
.sema
);
6889 #endif /* RXF_DEQUEUE_ON_BUSY */
6892 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6893 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6896 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6898 dhd_toe_get(dhd_info_t
*dhd
, int ifidx
, uint32
*toe_ol
)
6903 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
6907 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd
->pub
,
6912 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6916 memcpy(toe_ol
, buf
, sizeof(uint32
));
6920 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6922 dhd_toe_set(dhd_info_t
*dhd
, int ifidx
, uint32 toe_ol
)
6926 /* Set toe_ol as requested */
6927 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", (char *)&toe_ol
, sizeof(toe_ol
), NULL
, 0, TRUE
);
6929 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6930 dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6934 /* Enable toe globally only if any components are enabled. */
6935 toe
= (toe_ol
!= 0);
6936 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe", (char *)&toe
, sizeof(toe
), NULL
, 0, TRUE
);
6938 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6946 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6947 void dhd_set_scb_probe(dhd_pub_t
*dhd
)
6949 wl_scb_probe_t scb_probe
;
6952 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
6956 ret
= dhd_iovar(dhd
, 0, "scb_probe", NULL
, 0,
6957 (char *)&scb_probe
, sizeof(scb_probe
), FALSE
);
6959 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__
));
6962 scb_probe
.scb_max_probe
= NUM_SCB_MAX_PROBE
;
6964 ret
= dhd_iovar(dhd
, 0, "scb_probe", (char *)&scb_probe
, sizeof(scb_probe
),
6967 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__
));
6971 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6975 dhd_ethtool_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*info
)
6977 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
6979 snprintf(info
->driver
, sizeof(info
->driver
), "wl");
6980 snprintf(info
->version
, sizeof(info
->version
), "%lu", dhd
->pub
.drv_version
);
6983 struct ethtool_ops dhd_ethtool_ops
= {
6984 .get_drvinfo
= dhd_ethtool_get_drvinfo
6986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6989 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6991 dhd_ethtool(dhd_info_t
*dhd
, void *uaddr
)
6993 struct ethtool_drvinfo info
;
6994 char drvname
[sizeof(info
.driver
)];
6997 struct ethtool_value edata
;
6998 uint32 toe_cmpnt
, csum_dir
;
7002 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7004 /* all ethtool calls start with a cmd word */
7005 if (copy_from_user(&cmd
, uaddr
, sizeof (uint32
)))
7009 case ETHTOOL_GDRVINFO
:
7010 /* Copy out any request driver name */
7011 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
7013 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
7014 drvname
[sizeof(info
.driver
)-1] = '\0';
7016 /* clear struct for return */
7017 memset(&info
, 0, sizeof(info
));
7020 /* if dhd requested, identify ourselves */
7021 if (strcmp(drvname
, "?dhd") == 0) {
7022 snprintf(info
.driver
, sizeof(info
.driver
), "dhd");
7023 strncpy(info
.version
, EPI_VERSION_STR
, sizeof(info
.version
) - 1);
7024 info
.version
[sizeof(info
.version
) - 1] = '\0';
7027 /* otherwise, require dongle to be up */
7028 else if (!dhd
->pub
.up
) {
7029 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__
));
7033 /* finally, report dongle driver type */
7034 else if (dhd
->pub
.iswl
)
7035 snprintf(info
.driver
, sizeof(info
.driver
), "wl");
7037 snprintf(info
.driver
, sizeof(info
.driver
), "xx");
7039 snprintf(info
.version
, sizeof(info
.version
), "%lu", dhd
->pub
.drv_version
);
7040 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
7042 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__
,
7043 (int)sizeof(drvname
), drvname
, info
.driver
));
7047 /* Get toe offload components from dongle */
7048 case ETHTOOL_GRXCSUM
:
7049 case ETHTOOL_GTXCSUM
:
7050 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
7053 csum_dir
= (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
7056 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
7058 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
7062 /* Set toe offload components in dongle */
7063 case ETHTOOL_SRXCSUM
:
7064 case ETHTOOL_STXCSUM
:
7065 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
7068 /* Read the current settings, update and write back */
7069 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
7072 csum_dir
= (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
7074 if (edata
.data
!= 0)
7075 toe_cmpnt
|= csum_dir
;
7077 toe_cmpnt
&= ~csum_dir
;
7079 if ((ret
= dhd_toe_set(dhd
, 0, toe_cmpnt
)) < 0)
7082 /* If setting TX checksum mode, tell Linux the new mode */
7083 if (cmd
== ETHTOOL_STXCSUM
) {
7085 dhd
->iflist
[0]->net
->features
|= NETIF_F_IP_CSUM
;
7087 dhd
->iflist
[0]->net
->features
&= ~NETIF_F_IP_CSUM
;
7099 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7101 static bool dhd_check_hang(struct net_device
*net
, dhd_pub_t
*dhdp
, int error
)
7104 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
7111 #if !defined(BCMPCIE) && !defined(BCMDBUS)
7112 if (dhdp
->info
->thr_dpc_ctl
.thr_pid
< 0) {
7113 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__
));
7116 #endif /* !BCMPCIE && !BCMDBUS */
7118 if ((error
== -ETIMEDOUT
) || (error
== -EREMOTEIO
) ||
7119 ((dhdp
->busstate
== DHD_BUS_DOWN
) && (!dhdp
->dongle_reset
))) {
7121 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
7122 __FUNCTION__
, dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
,
7123 dhdp
->d3ackcnt_timeout
, error
, dhdp
->busstate
));
7125 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__
,
7126 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, error
, dhdp
->busstate
));
7127 #endif /* BCMPCIE */
7128 if (dhdp
->hang_reason
== 0) {
7129 if (dhdp
->dongle_trap_occured
) {
7130 dhdp
->hang_reason
= HANG_REASON_DONGLE_TRAP
;
7132 } else if (dhdp
->d3ackcnt_timeout
) {
7133 dhdp
->hang_reason
= HANG_REASON_D3_ACK_TIMEOUT
;
7134 #endif /* BCMPCIE */
7136 dhdp
->hang_reason
= HANG_REASON_IOCTL_RESP_TIMEOUT
;
7139 net_os_send_hang_message(net
);
7147 dhd_monitor_enabled(dhd_pub_t
*dhd
, int ifidx
)
7149 return (dhd
->info
->monitor_type
!= 0);
7153 dhd_rx_mon_pkt(dhd_pub_t
*dhdp
, host_rxbuf_cmpl_t
* msg
, void *pkt
, int ifidx
)
7155 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7156 #ifdef HOST_RADIOTAP_CONV
7157 uint16 len
= 0, offset
= 0;
7158 monitor_pkt_info_t pkt_info
;
7159 memcpy(&pkt_info
.marker
, &msg
->marker
, sizeof(msg
->marker
));
7160 memcpy(&pkt_info
.ts
, &msg
->ts
, sizeof(monitor_pkt_ts_t
));
7162 if (!dhd
->monitor_skb
) {
7163 if ((dhd
->monitor_skb
= dev_alloc_skb(MAX_MON_PKT_SIZE
)) == NULL
)
7167 len
= bcmwifi_monitor(dhd
->monitor_info
, &pkt_info
, PKTDATA(dhdp
->osh
, pkt
),
7168 PKTLEN(dhdp
->osh
, pkt
), PKTDATA(dhdp
->osh
, dhd
->monitor_skb
), &offset
);
7170 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
7171 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
7173 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7174 dev_kfree_skb(dhd
->monitor_skb
);
7178 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7184 skb_put(dhd
->monitor_skb
, len
);
7185 skb_pull(dhd
->monitor_skb
, offset
);
7187 dhd
->monitor_skb
->protocol
= eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
7189 uint8 amsdu_flag
= (msg
->flags
& BCMPCIE_PKT_FLAGS_MONITOR_MASK
) >>
7190 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT
;
7191 switch (amsdu_flag
) {
7192 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU
:
7194 if (!dhd
->monitor_skb
) {
7195 if ((dhd
->monitor_skb
= PKTTONATIVE(dhdp
->osh
, pkt
)) == NULL
)
7199 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
7200 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
7202 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7203 dhd
->monitor_skb
= NULL
;
7207 dhd
->monitor_skb
->protocol
=
7208 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
7209 dhd
->monitor_len
= 0;
7211 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT
:
7212 if (!dhd
->monitor_skb
) {
7213 if ((dhd
->monitor_skb
= dev_alloc_skb(MAX_MON_PKT_SIZE
)) == NULL
)
7215 dhd
->monitor_len
= 0;
7217 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
7218 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
7220 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7221 dev_kfree_skb(dhd
->monitor_skb
);
7224 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
),
7225 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
7227 dhd
->monitor_len
= PKTLEN(dhdp
->osh
, pkt
);
7228 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7230 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT
:
7231 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
7232 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
7233 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
7235 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7237 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT
:
7238 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
7239 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
7240 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
7242 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
7243 skb_put(dhd
->monitor_skb
, dhd
->monitor_len
);
7244 dhd
->monitor_skb
->protocol
=
7245 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
7246 dhd
->monitor_len
= 0;
7250 #endif /* HOST_RADIOTAP_CONV */
7251 if (in_interrupt()) {
7252 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
7253 __FUNCTION__
, __LINE__
);
7254 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7255 netif_rx(dhd
->monitor_skb
);
7256 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7258 /* If the receive is not processed inside an ISR,
7259 * the softirqd must be woken explicitly to service
7260 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
7261 * by netif_rx_ni(), but in earlier kernels, we need
7262 * to do it manually.
7264 bcm_object_trace_opr(dhd
->monitor_skb
, BCM_OBJDBG_REMOVE
,
7265 __FUNCTION__
, __LINE__
);
7267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7268 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7269 netif_rx_ni(dhd
->monitor_skb
);
7270 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7273 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7274 netif_rx(dhd
->monitor_skb
);
7275 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7276 local_irq_save(flags
);
7278 local_irq_restore(flags
);
7279 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7282 dhd
->monitor_skb
= NULL
;
7285 typedef struct dhd_mon_dev_priv
{
7286 struct net_device_stats stats
;
7287 } dhd_mon_dev_priv_t
;
7289 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
7290 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
7291 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
7294 dhd_monitor_start(struct sk_buff
*skb
, struct net_device
*dev
)
7296 PKTFREE(NULL
, skb
, FALSE
);
7301 dhd_monitor_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7306 static struct net_device_stats
*
7307 dhd_monitor_get_stats(struct net_device
*dev
)
7309 return &DHD_MON_DEV_STATS(dev
);
7312 static const struct net_device_ops netdev_monitor_ops
=
7314 .ndo_start_xmit
= dhd_monitor_start
,
7315 .ndo_get_stats
= dhd_monitor_get_stats
,
7316 .ndo_do_ioctl
= dhd_monitor_ioctl
7320 dhd_add_monitor_if(void *handle
, void *event_info
, u8 event
)
7322 dhd_info_t
*dhd
= handle
;
7323 struct net_device
*dev
;
7326 if (event
!= DHD_WQ_WORK_IF_ADD
) {
7327 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
7332 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7336 dev
= alloc_etherdev(DHD_MON_DEV_PRIV_SIZE
);
7338 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__
));
7342 devname
= "radiotap";
7344 snprintf(dev
->name
, sizeof(dev
->name
), "%s%u", devname
, dhd
->unit
);
7346 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7347 #define ARPHRD_IEEE80211_PRISM 802
7350 #ifndef ARPHRD_IEEE80211_RADIOTAP
7351 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7352 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7354 dev
->type
= ARPHRD_IEEE80211_RADIOTAP
;
7356 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7357 dev
->hard_start_xmit
= dhd_monitor_start
;
7358 dev
->do_ioctl
= dhd_monitor_ioctl
;
7359 dev
->get_stats
= dhd_monitor_get_stats
;
7361 dev
->netdev_ops
= &netdev_monitor_ops
;
7364 if (register_netdev(dev
)) {
7365 DHD_ERROR(("%s, register_netdev failed for %s\n",
7366 __FUNCTION__
, dev
->name
));
7370 bcmwifi_monitor_create(&dhd
->monitor_info
);
7371 dhd
->monitor_dev
= dev
;
7375 dhd_del_monitor_if(void *handle
, void *event_info
, u8 event
)
7377 dhd_info_t
*dhd
= handle
;
7379 if (event
!= DHD_WQ_WORK_IF_DEL
) {
7380 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
7385 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7389 if (dhd
->monitor_dev
) {
7390 unregister_netdev(dhd
->monitor_dev
);
7392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7393 MFREE(dhd
->osh
, dhd
->monitor_dev
->priv
, DHD_MON_DEV_PRIV_SIZE
);
7394 MFREE(dhd
->osh
, dhd
->monitor_dev
, sizeof(struct net_device
));
7396 free_netdev(dhd
->monitor_dev
);
7399 dhd
->monitor_dev
= NULL
;
7402 if (dhd
->monitor_info
) {
7403 bcmwifi_monitor_delete(dhd
->monitor_info
);
7404 dhd
->monitor_info
= NULL
;
7409 dhd_set_monitor(dhd_pub_t
*dhd
, int ifidx
, int val
)
7411 dhd_info_t
*info
= dhd
->info
;
7413 DHD_TRACE(("%s: val %d\n", __FUNCTION__
, val
));
7414 if ((val
&& info
->monitor_dev
) || (!val
&& !info
->monitor_dev
)) {
7415 DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__
));
7419 /* Delete monitor */
7421 info
->monitor_type
= val
;
7422 dhd_deferred_schedule_work(info
->dhd_deferred_wq
, NULL
, DHD_WQ_WORK_IF_DEL
,
7423 dhd_del_monitor_if
, DHD_WQ_WORK_PRIORITY_LOW
);
7428 info
->monitor_type
= val
;
7429 dhd_deferred_schedule_work(info
->dhd_deferred_wq
, NULL
, DHD_WQ_WORK_IF_ADD
,
7430 dhd_add_monitor_if
, DHD_WQ_WORK_PRIORITY_LOW
);
7432 #endif /* WL_MONITOR */
7434 int dhd_ioctl_process(dhd_pub_t
*pub
, int ifidx
, dhd_ioctl_t
*ioc
, void *data_buf
)
7436 int bcmerror
= BCME_OK
;
7438 struct net_device
*net
;
7440 #ifdef REPORT_FATAL_TIMEOUTS
7441 if (ioc
->cmd
== WLC_SET_WPA_AUTH
) {
7444 wpa_auth
= *((int *)ioc
->buf
);
7445 DHD_INFO(("wpa_auth:%d\n", wpa_auth
));
7446 if (wpa_auth
!= WPA_AUTH_DISABLED
) {
7447 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7448 dhd_set_join_error(pub
, WLC_WPA_MASK
);
7450 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7451 dhd_clear_join_error(pub
, WLC_WPA_MASK
);
7455 if (ioc
->cmd
== WLC_SET_AUTH
) {
7457 auth
= *((int *)ioc
->buf
);
7458 DHD_INFO(("Auth:%d\n", auth
));
7460 if (auth
!= WL_AUTH_OPEN_SYSTEM
) {
7461 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7462 dhd_set_join_error(pub
, WLC_WPA_MASK
);
7464 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7465 dhd_clear_join_error(pub
, WLC_WPA_MASK
);
7468 #endif /* REPORT_FATAL_TIMEOUTS */
7469 net
= dhd_idx2net(pub
, ifidx
);
7471 bcmerror
= BCME_BADARG
;
7475 /* check for local dhd ioctl and handle it */
7476 if (ioc
->driver
== DHD_IOCTL_MAGIC
) {
7477 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7479 buflen
= MIN(ioc
->len
, DHD_IOCTL_MAXLEN
);
7480 bcmerror
= dhd_ioctl((void *)pub
, ioc
, data_buf
, buflen
);
7482 pub
->bcmerror
= bcmerror
;
7486 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7488 buflen
= MIN(ioc
->len
, WLC_IOCTL_MAXLEN
);
7491 /* send to dongle (must be up, and wl). */
7492 if (pub
->busstate
== DHD_BUS_DOWN
|| pub
->busstate
== DHD_BUS_LOAD
) {
7493 if ((!pub
->dongle_trap_occured
) && allow_delay_fwdl
) {
7495 if (atomic_read(&exit_in_progress
)) {
7496 DHD_ERROR(("%s module exit in progress\n", __func__
));
7497 bcmerror
= BCME_DONGLE_DOWN
;
7500 ret
= dhd_bus_start(pub
);
7502 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
7503 bcmerror
= BCME_DONGLE_DOWN
;
7507 bcmerror
= BCME_DONGLE_DOWN
;
7513 bcmerror
= BCME_DONGLE_DOWN
;
7516 #endif /* !BCMDBUS */
7519 * Flush the TX queue if required for proper message serialization:
7520 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7521 * prevent M4 encryption and
7522 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7523 * prevent disassoc frame being sent before WPS-DONE frame.
7525 if (ioc
->cmd
== WLC_SET_KEY
||
7526 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7527 strncmp("wsec_key", data_buf
, 9) == 0) ||
7528 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7529 strncmp("bsscfg:wsec_key", data_buf
, 15) == 0) ||
7530 ioc
->cmd
== WLC_DISASSOC
)
7531 dhd_wait_pend8021x(net
);
7535 /* short cut wl ioctl calls here */
7536 if (strcmp("htsf", data_buf
) == 0) {
7537 dhd_ioctl_htsf_get(dhd
, 0);
7541 if (strcmp("htsflate", data_buf
) == 0) {
7543 memset(ts
, 0, sizeof(tstamp_t
)*TSMAX
);
7544 memset(&maxdelayts
, 0, sizeof(tstamp_t
));
7548 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7549 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7550 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7551 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7557 if (strcmp("htsfclear", data_buf
) == 0) {
7558 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7559 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7560 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7561 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
7565 if (strcmp("htsfhis", data_buf
) == 0) {
7566 dhd_dump_htsfhisto(&vi_d1
, "H to D");
7567 dhd_dump_htsfhisto(&vi_d2
, "D to D");
7568 dhd_dump_htsfhisto(&vi_d3
, "D to H");
7569 dhd_dump_htsfhisto(&vi_d4
, "H to H");
7572 if (strcmp("tsport", data_buf
) == 0) {
7574 memcpy(&tsport
, data_buf
+ 7, 4);
7576 DHD_ERROR(("current timestamp port: %d \n", tsport
));
7581 #endif /* WLMEDIA_HTSF */
7583 if ((ioc
->cmd
== WLC_SET_VAR
|| ioc
->cmd
== WLC_GET_VAR
) &&
7584 data_buf
!= NULL
&& strncmp("rpc_", data_buf
, 4) == 0) {
7586 bcmerror
= dhd_fdaggr_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
7588 bcmerror
= BCME_UNSUPPORTED
;
7592 bcmerror
= dhd_wl_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
7595 /* Intercept monitor ioctl here, add/del monitor if */
7596 if (bcmerror
== BCME_OK
&& ioc
->cmd
== WLC_SET_MONITOR
) {
7597 dhd_set_monitor(pub
, ifidx
, *(int32
*)data_buf
);
7601 #ifdef REPORT_FATAL_TIMEOUTS
7602 if (ioc
->cmd
== WLC_SCAN
&& bcmerror
== 0) {
7603 dhd_start_scan_timer(pub
);
7605 if (ioc
->cmd
== WLC_SET_SSID
&& bcmerror
== 0) {
7606 dhd_start_join_timer(pub
);
7608 #endif /* REPORT_FATAL_TIMEOUTS */
7611 dhd_check_hang(net
, pub
, bcmerror
);
7617 dhd_ioctl_entry(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
7619 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7624 void *local_buf
= NULL
;
7625 void __user
*ioc_buf_user
= NULL
;
7628 if (atomic_read(&exit_in_progress
)) {
7629 DHD_ERROR(("%s module exit in progress\n", __func__
));
7630 bcmerror
= BCME_DONGLE_DOWN
;
7631 return OSL_ERROR(bcmerror
);
7634 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7635 DHD_PERIM_LOCK(&dhd
->pub
);
7637 /* Interface up check for built-in type */
7638 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== FALSE
) {
7639 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__
));
7640 DHD_PERIM_UNLOCK(&dhd
->pub
);
7641 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7642 return OSL_ERROR(BCME_NOTUP
);
7645 ifidx
= dhd_net2idx(dhd
, net
);
7646 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__
, ifidx
, cmd
));
7648 if (ifidx
== DHD_BAD_IF
) {
7649 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__
));
7650 DHD_PERIM_UNLOCK(&dhd
->pub
);
7651 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7655 #if defined(WL_WIRELESS_EXT)
7656 /* linux wireless extensions */
7657 if ((cmd
>= SIOCIWFIRST
) && (cmd
<= SIOCIWLAST
)) {
7658 /* may recurse, do NOT lock */
7659 ret
= wl_iw_ioctl(net
, ifr
, cmd
);
7660 DHD_PERIM_UNLOCK(&dhd
->pub
);
7661 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7664 #endif /* defined(WL_WIRELESS_EXT) */
7666 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7667 if (cmd
== SIOCETHTOOL
) {
7668 ret
= dhd_ethtool(dhd
, (void*)ifr
->ifr_data
);
7669 DHD_PERIM_UNLOCK(&dhd
->pub
);
7670 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7673 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7675 if (cmd
== SIOCDEVPRIVATE
+1) {
7676 ret
= wl_android_priv_cmd(net
, ifr
, cmd
);
7677 dhd_check_hang(net
, &dhd
->pub
, ret
);
7678 DHD_PERIM_UNLOCK(&dhd
->pub
);
7679 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7683 if (cmd
!= SIOCDEVPRIVATE
) {
7684 DHD_PERIM_UNLOCK(&dhd
->pub
);
7685 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7689 memset(&ioc
, 0, sizeof(ioc
));
7691 #ifdef CONFIG_COMPAT
7692 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7693 if (in_compat_syscall())
7695 if (is_compat_task())
7698 compat_wl_ioctl_t compat_ioc
;
7699 if (copy_from_user(&compat_ioc
, ifr
->ifr_data
, sizeof(compat_wl_ioctl_t
))) {
7700 bcmerror
= BCME_BADADDR
;
7703 ioc
.cmd
= compat_ioc
.cmd
;
7704 if (ioc
.cmd
& WLC_SPEC_FLAG
) {
7705 memset(&ioc
, 0, sizeof(ioc
));
7706 /* Copy the ioc control structure part of ioctl request */
7707 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
7708 bcmerror
= BCME_BADADDR
;
7711 ioc
.cmd
&= ~WLC_SPEC_FLAG
; /* Clear the FLAG */
7713 /* To differentiate between wl and dhd read 4 more byes */
7714 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
7715 sizeof(uint
)) != 0)) {
7716 bcmerror
= BCME_BADADDR
;
7720 } else { /* ioc.cmd & WLC_SPEC_FLAG */
7721 ioc
.buf
= compat_ptr(compat_ioc
.buf
);
7722 ioc
.len
= compat_ioc
.len
;
7723 ioc
.set
= compat_ioc
.set
;
7724 ioc
.used
= compat_ioc
.used
;
7725 ioc
.needed
= compat_ioc
.needed
;
7726 /* To differentiate between wl and dhd read 4 more byes */
7727 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(compat_wl_ioctl_t
),
7728 sizeof(uint
)) != 0)) {
7729 bcmerror
= BCME_BADADDR
;
7732 } /* ioc.cmd & WLC_SPEC_FLAG */
7734 #endif /* CONFIG_COMPAT */
7736 /* Copy the ioc control structure part of ioctl request */
7737 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
7738 bcmerror
= BCME_BADADDR
;
7741 #ifdef CONFIG_COMPAT
7742 ioc
.cmd
&= ~WLC_SPEC_FLAG
; /* make sure it was clear when it isn't a compat task*/
7745 /* To differentiate between wl and dhd read 4 more byes */
7746 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
7747 sizeof(uint
)) != 0)) {
7748 bcmerror
= BCME_BADADDR
;
7753 if (!capable(CAP_NET_ADMIN)) {
7754 bcmerror = BCME_EPERM;
7758 /* Take backup of ioc.buf and restore later */
7759 ioc_buf_user
= ioc
.buf
;
7762 buflen
= MIN(ioc
.len
, DHD_IOCTL_MAXLEN
);
7763 if (!(local_buf
= MALLOC(dhd
->pub
.osh
, buflen
+1))) {
7764 bcmerror
= BCME_NOMEM
;
7768 DHD_PERIM_UNLOCK(&dhd
->pub
);
7769 if (copy_from_user(local_buf
, ioc
.buf
, buflen
)) {
7770 DHD_PERIM_LOCK(&dhd
->pub
);
7771 bcmerror
= BCME_BADADDR
;
7774 DHD_PERIM_LOCK(&dhd
->pub
);
7776 *((char *)local_buf
+ buflen
) = '\0';
7778 /* For some platforms accessing userspace memory
7779 * of ioc.buf is causing kernel panic, so to avoid that
7780 * make ioc.buf pointing to kernel space memory local_buf
7782 ioc
.buf
= local_buf
;
7785 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7786 if (ioc
.driver
!= DHD_IOCTL_MAGIC
&& dhd
->pub
.hang_was_sent
) {
7787 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__
));
7788 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd
->pub
, DHD_EVENT_TIMEOUT_MS
);
7789 bcmerror
= BCME_DONGLE_DOWN
;
7793 bcmerror
= dhd_ioctl_process(&dhd
->pub
, ifidx
, &ioc
, local_buf
);
7795 /* Restore back userspace pointer to ioc.buf */
7796 ioc
.buf
= ioc_buf_user
;
7798 if (!bcmerror
&& buflen
&& local_buf
&& ioc
.buf
) {
7799 DHD_PERIM_UNLOCK(&dhd
->pub
);
7800 if (copy_to_user(ioc
.buf
, local_buf
, buflen
))
7802 DHD_PERIM_LOCK(&dhd
->pub
);
7807 MFREE(dhd
->pub
.osh
, local_buf
, buflen
+1);
7809 DHD_PERIM_UNLOCK(&dhd
->pub
);
7810 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7812 return OSL_ERROR(bcmerror
);
7816 #ifdef FIX_CPU_MIN_CLOCK
7817 static int dhd_init_cpufreq_fix(dhd_info_t
*dhd
)
7820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7821 mutex_init(&dhd
->cpufreq_fix
);
7823 dhd
->cpufreq_fix_status
= FALSE
;
7828 static void dhd_fix_cpu_freq(dhd_info_t
*dhd
)
7830 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7831 mutex_lock(&dhd
->cpufreq_fix
);
7833 if (dhd
&& !dhd
->cpufreq_fix_status
) {
7834 pm_qos_add_request(&dhd
->dhd_cpu_qos
, PM_QOS_CPU_FREQ_MIN
, 300000);
7835 #ifdef FIX_BUS_MIN_CLOCK
7836 pm_qos_add_request(&dhd
->dhd_bus_qos
, PM_QOS_BUS_THROUGHPUT
, 400000);
7837 #endif /* FIX_BUS_MIN_CLOCK */
7838 DHD_ERROR(("pm_qos_add_requests called\n"));
7840 dhd
->cpufreq_fix_status
= TRUE
;
7842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7843 mutex_unlock(&dhd
->cpufreq_fix
);
7847 static void dhd_rollback_cpu_freq(dhd_info_t
*dhd
)
7849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7850 mutex_lock(&dhd
->cpufreq_fix
);
7852 if (dhd
&& dhd
->cpufreq_fix_status
!= TRUE
) {
7853 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7854 mutex_unlock(&dhd
->cpufreq_fix
);
7859 pm_qos_remove_request(&dhd
->dhd_cpu_qos
);
7860 #ifdef FIX_BUS_MIN_CLOCK
7861 pm_qos_remove_request(&dhd
->dhd_bus_qos
);
7862 #endif /* FIX_BUS_MIN_CLOCK */
7863 DHD_ERROR(("pm_qos_add_requests called\n"));
7865 dhd
->cpufreq_fix_status
= FALSE
;
7866 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7867 mutex_unlock(&dhd
->cpufreq_fix
);
7870 #endif /* FIX_CPU_MIN_CLOCK */
7872 #if defined(BT_OVER_SDIO)
7875 dhdsdio_bus_usr_cnt_inc(dhd_pub_t
*dhdp
)
7877 dhdp
->info
->bus_user_count
++;
7881 dhdsdio_bus_usr_cnt_dec(dhd_pub_t
*dhdp
)
7883 dhdp
->info
->bus_user_count
--;
7887 * Success: Returns 0
7888 * Failure: Returns -1 or errono code
7891 dhd_bus_get(wlan_bt_handle_t handle
, bus_owner_t owner
)
7893 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7894 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7897 mutex_lock(&dhd
->bus_user_lock
);
7898 ++dhd
->bus_user_count
;
7899 if (dhd
->bus_user_count
< 0) {
7900 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
7905 if (dhd
->bus_user_count
== 1) {
7907 dhd
->pub
.hang_was_sent
= 0;
7909 /* First user, turn on WL_REG, start the bus */
7910 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__
));
7912 if (!wifi_platform_set_power(dhd
->adapter
, TRUE
, WIFI_TURNON_DELAY
)) {
7914 ret
= dhd_bus_resume(dhdp
, 0);
7916 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
7917 __FUNCTION__
, ret
));
7922 dhd_update_fw_nv_path(dhd
);
7923 /* update firmware and nvram path to sdio bus */
7924 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
7925 dhd
->fw_path
, dhd
->nv_path
);
7926 /* download the firmware, Enable F2 */
7927 /* TODO: Should be done only in case of FW switch */
7928 ret
= dhd_bus_devreset(dhdp
, FALSE
);
7929 dhd_bus_resume(dhdp
, 1);
7931 if (dhd_sync_with_dongle(&dhd
->pub
) < 0) {
7932 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__
));
7936 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__
, ret
));
7939 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
7940 __FUNCTION__
, dhd
->bus_user_count
));
7943 mutex_unlock(&dhd
->bus_user_lock
);
7946 EXPORT_SYMBOL(dhd_bus_get
);
7949 * Success: Returns 0
7950 * Failure: Returns -1 or errono code
7953 dhd_bus_put(wlan_bt_handle_t handle
, bus_owner_t owner
)
7955 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7956 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7958 BCM_REFERENCE(owner
);
7960 mutex_lock(&dhd
->bus_user_lock
);
7961 --dhd
->bus_user_count
;
7962 if (dhd
->bus_user_count
< 0) {
7963 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
7964 dhd
->bus_user_count
= 0;
7969 if (dhd
->bus_user_count
== 0) {
7970 /* Last user, stop the bus and turn Off WL_REG */
7971 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7973 #ifdef PROP_TXSTATUS
7974 if (dhd
->pub
.wlfc_enabled
) {
7975 dhd_wlfc_deinit(&dhd
->pub
);
7977 #endif /* PROP_TXSTATUS */
7979 if (dhd
->pub
.pno_state
) {
7980 dhd_pno_deinit(&dhd
->pub
);
7982 #endif /* PNO_SUPPORT */
7984 if (dhd
->pub
.rtt_state
) {
7985 dhd_rtt_deinit(&dhd
->pub
);
7987 #endif /* RTT_SUPPORT */
7988 ret
= dhd_bus_devreset(dhdp
, TRUE
);
7990 dhd_bus_suspend(dhdp
);
7991 wifi_platform_set_power(dhd
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
7994 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7995 __FUNCTION__
, dhd
->bus_user_count
));
7998 mutex_unlock(&dhd
->bus_user_lock
);
8001 EXPORT_SYMBOL(dhd_bus_put
);
8004 dhd_net_bus_get(struct net_device
*dev
)
8006 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8007 return dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
8011 dhd_net_bus_put(struct net_device
*dev
)
8013 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8014 return dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
8018 * Function to enable the Bus Clock
8019 * Returns BCME_OK on success and BCME_xxx on failure
8021 * This function is not callable from non-sleepable context
8023 int dhd_bus_clk_enable(wlan_bt_handle_t handle
, bus_owner_t owner
)
8025 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
8029 dhd_os_sdlock(dhdp
);
8031 * The second argument is TRUE, that means, we expect
8032 * the function to "wait" until the clocks are really
8035 ret
= __dhdsdio_clk_enable(dhdp
->bus
, owner
, TRUE
);
8036 dhd_os_sdunlock(dhdp
);
8040 EXPORT_SYMBOL(dhd_bus_clk_enable
);
8043 * Function to disable the Bus Clock
8044 * Returns BCME_OK on success and BCME_xxx on failure
8046 * This function is not callable from non-sleepable context
8048 int dhd_bus_clk_disable(wlan_bt_handle_t handle
, bus_owner_t owner
)
8050 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
8054 dhd_os_sdlock(dhdp
);
8056 * The second argument is TRUE, that means, we expect
8057 * the function to "wait" until the clocks are really
8060 ret
= __dhdsdio_clk_disable(dhdp
->bus
, owner
, TRUE
);
8061 dhd_os_sdunlock(dhdp
);
8065 EXPORT_SYMBOL(dhd_bus_clk_disable
);
8068 * Function to reset bt_use_count counter to zero.
8070 * This function is not callable from non-sleepable context
8072 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle
)
8074 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
8076 /* take the lock and reset bt use count */
8077 dhd_os_sdlock(dhdp
);
8078 dhdsdio_reset_bt_use_count(dhdp
->bus
);
8079 dhd_os_sdunlock(dhdp
);
8081 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count
);
8083 #endif /* BT_OVER_SDIO */
8085 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
8086 int dhd_deepsleep(dhd_info_t
*dhd
, int flag
)
8097 case 1 : /* Deepsleep on */
8098 DHD_ERROR(("dhd_deepsleep: ON\n"));
8099 /* give some time to sysioc_work before deepsleep */
8101 #ifdef PKT_FILTER_SUPPORT
8102 /* disable pkt filter */
8103 dhd_enable_packet_filter(0, dhdp
);
8104 #endif /* PKT_FILTER_SUPPORT */
8107 memset(iovbuf
, 0, sizeof(iovbuf
));
8108 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
8109 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
8111 /* Enable Deepsleep */
8113 memset(iovbuf
, 0, sizeof(iovbuf
));
8114 bcm_mkiovar("deepsleep", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
8115 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
8118 case 0: /* Deepsleep Off */
8119 DHD_ERROR(("dhd_deepsleep: OFF\n"));
8121 /* Disable Deepsleep */
8122 for (cnt
= 0; cnt
< MAX_TRY_CNT
; cnt
++) {
8124 memset(iovbuf
, 0, sizeof(iovbuf
));
8125 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
8126 iovbuf
, sizeof(iovbuf
));
8127 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
,
8128 sizeof(iovbuf
), TRUE
, 0);
8130 memset(iovbuf
, 0, sizeof(iovbuf
));
8131 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
8132 iovbuf
, sizeof(iovbuf
));
8133 if ((ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
,
8134 sizeof(iovbuf
), FALSE
, 0)) < 0) {
8135 DHD_ERROR(("the error of dhd deepsleep status"
8136 " ret value :%d\n", ret
));
8138 if (!(*(int *)iovbuf
)) {
8139 DHD_ERROR(("deepsleep mode is 0,"
8140 " count: %d\n", cnt
));
8148 memset(iovbuf
, 0, sizeof(iovbuf
));
8149 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
8150 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
8158 dhd_stop(struct net_device
*net
)
8162 unsigned long flags
= 0;
8163 #endif /* WL_CFG80211 */
8164 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
8165 DHD_OS_WAKE_LOCK(&dhd
->pub
);
8166 DHD_PERIM_LOCK(&dhd
->pub
);
8167 printf("%s: Enter %p\n", __FUNCTION__
, net
);
8168 dhd
->pub
.rxcnt_timeout
= 0;
8169 dhd
->pub
.txcnt_timeout
= 0;
8172 dhd
->pub
.d3ackcnt_timeout
= 0;
8173 #endif /* BCMPCIE */
8175 if (dhd
->pub
.up
== 0) {
8178 #if defined(DHD_HANG_SEND_UP_TEST)
8179 if (dhd
->pub
.req_hang_type
) {
8180 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
8181 __FUNCTION__
, dhd
->pub
.req_hang_type
));
8182 dhd
->pub
.req_hang_type
= 0;
8184 #endif /* DHD_HANG_SEND_UP_TEST */
8186 dhd_if_flush_sta(DHD_DEV_IFP(net
));
8188 /* Disable Runtime PM before interface down */
8189 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
8191 #ifdef FIX_CPU_MIN_CLOCK
8192 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
)
8193 dhd_rollback_cpu_freq(dhd
);
8194 #endif /* FIX_CPU_MIN_CLOCK */
8196 ifidx
= dhd_net2idx(dhd
, net
);
8197 BCM_REFERENCE(ifidx
);
8199 /* Set state and stop OS transmissions */
8200 netif_stop_queue(net
);
8202 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
8204 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
8207 #endif /* WL_CFG80211 */
8212 wl_cfg80211_down(net
);
8214 ifp
= dhd
->iflist
[0];
8215 ASSERT(ifp
&& ifp
->net
);
8217 * For CFG80211: Clean up all the left over virtual interfaces
8218 * when the primary Interface is brought down. [ifconfig wlan0 down]
8220 if (!dhd_download_fw_on_driverload
) {
8221 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) &&
8222 (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
8224 #ifdef WL_CFG80211_P2P_DEV_IF
8225 wl_cfg80211_del_p2p_wdev(net
);
8226 #endif /* WL_CFG80211_P2P_DEV_IF */
8228 dhd_net_if_lock_local(dhd
);
8229 for (i
= 1; i
< DHD_MAX_IFS
; i
++)
8230 dhd_remove_if(&dhd
->pub
, i
, FALSE
);
8232 if (ifp
&& ifp
->net
) {
8233 dhd_if_del_sta_list(ifp
);
8235 #ifdef ARP_OFFLOAD_SUPPORT
8236 if (dhd_inetaddr_notifier_registered
) {
8237 dhd_inetaddr_notifier_registered
= FALSE
;
8238 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
8240 #endif /* ARP_OFFLOAD_SUPPORT */
8241 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8242 if (dhd_inet6addr_notifier_registered
) {
8243 dhd_inet6addr_notifier_registered
= FALSE
;
8244 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
8246 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8247 dhd_net_if_unlock_local(dhd
);
8250 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
8251 cancel_work_sync(dhd
->dhd_deferred_wq
);
8254 #ifdef SHOW_LOGTRACE
8255 /* Wait till event_log_dispatcher_work finishes */
8256 cancel_work_sync(&dhd
->event_log_dispatcher_work
);
8257 #endif /* SHOW_LOGTRACE */
8259 #if defined(DHD_LB_RXP)
8260 __skb_queue_purge(&dhd
->rx_pend_queue
);
8261 #endif /* DHD_LB_RXP */
8263 #if defined(DHD_LB_TXP)
8264 skb_queue_purge(&dhd
->tx_pend_queue
);
8265 #endif /* DHD_LB_TXP */
8268 argos_register_notifier_deinit();
8269 #ifdef DHDTCPACK_SUPPRESS
8270 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8271 #endif /* DHDTCPACK_SUPPRESS */
8272 #if defined(DHD_LB_RXP)
8273 if (ifp
->net
== dhd
->rx_napi_netdev
) {
8274 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
8275 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
8276 skb_queue_purge(&dhd
->rx_napi_queue
);
8277 napi_disable(&dhd
->rx_napi_struct
);
8278 netif_napi_del(&dhd
->rx_napi_struct
);
8279 dhd
->rx_napi_netdev
= NULL
;
8281 #endif /* DHD_LB_RXP */
8283 #endif /* WL_CFG80211 */
8285 DHD_SSSR_DUMP_DEINIT(&dhd
->pub
);
8287 #ifdef PROP_TXSTATUS
8288 dhd_wlfc_cleanup(&dhd
->pub
, NULL
, 0);
8290 #ifdef SHOW_LOGTRACE
8291 if (!dhd_download_fw_on_driverload
) {
8292 /* Release the skbs from queue for WLC_E_TRACE event */
8293 dhd_event_logtrace_flush_queue(&dhd
->pub
);
8294 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
8295 if (dhd
->event_data
.fmts
) {
8296 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
8297 dhd
->event_data
.fmts_size
);
8298 dhd
->event_data
.fmts
= NULL
;
8300 if (dhd
->event_data
.raw_fmts
) {
8301 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
8302 dhd
->event_data
.raw_fmts_size
);
8303 dhd
->event_data
.raw_fmts
= NULL
;
8305 if (dhd
->event_data
.raw_sstr
) {
8306 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
8307 dhd
->event_data
.raw_sstr_size
);
8308 dhd
->event_data
.raw_sstr
= NULL
;
8310 if (dhd
->event_data
.rom_raw_sstr
) {
8311 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
8312 dhd
->event_data
.rom_raw_sstr_size
);
8313 dhd
->event_data
.rom_raw_sstr
= NULL
;
8315 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
8318 #endif /* SHOW_LOGTRACE */
8320 dhd_dev_apf_delete_filter(net
);
8323 /* Stop the protocol module */
8324 dhd_prot_stop(&dhd
->pub
);
8326 OLD_MOD_DEC_USE_COUNT
;
8328 if (ifidx
== 0 && !dhd_download_fw_on_driverload
) {
8329 #if defined(BT_OVER_SDIO)
8330 dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
8331 wl_android_set_wifi_on_flag(FALSE
);
8333 wl_android_wifi_off(net
, TRUE
);
8334 #ifdef WL_EXT_IAPSTA
8335 wl_ext_iapsta_dettach_netdev();
8338 if (dhd
->pub
.conf
->deepsleep
)
8339 dhd_deepsleep(dhd
, 1);
8340 #endif /* BT_OVER_SDIO */
8342 dhd
->pub
.hang_was_sent
= 0;
8344 /* Clear country spec for for built-in type driver */
8345 if (!dhd_download_fw_on_driverload
) {
8346 dhd
->pub
.dhd_cspec
.country_abbrev
[0] = 0x00;
8347 dhd
->pub
.dhd_cspec
.rev
= 0;
8348 dhd
->pub
.dhd_cspec
.ccode
[0] = 0x00;
8355 DHD_PERIM_UNLOCK(&dhd
->pub
);
8356 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
8358 /* Destroy wakelock */
8359 if (!dhd_download_fw_on_driverload
&&
8360 (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
8361 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
8362 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_WAKELOCKS_INIT
;
8364 printf("%s: Exit\n", __FUNCTION__
);
8369 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
8370 extern bool g_first_broadcast_scan
;
8374 static int dhd_interworking_enable(dhd_pub_t
*dhd
)
8376 uint32 enable
= true;
8379 ret
= dhd_iovar(dhd
, 0, "interworking", (char *)&enable
, sizeof(enable
), NULL
, 0, TRUE
);
8381 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__
, ret
));
8389 dhd_open(struct net_device
*net
)
8391 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
8396 char iovbuf
[WLC_IOCTL_SMLEN
];
8397 dbus_config_t config
;
8398 uint32 agglimit
= 0;
8399 uint32 rpc_agg
= BCM_RPC_TP_DNGL_AGG_DPC
; /* host aggr not enabled yet */
8400 #endif /* BCM_FD_AGGR */
8403 #if defined(OOB_INTR_ONLY)
8404 uint32 bus_type
= -1;
8405 uint32 bus_num
= -1;
8406 uint32 slot_num
= -1;
8407 wifi_adapter_info_t
*adapter
= NULL
;
8409 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8410 int bytes_written
= 0;
8411 struct dhd_conf
*conf
;
8414 if (!dhd_download_fw_on_driverload
) {
8415 if (!dhd_driver_init_done
) {
8416 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__
));
8421 printf("%s: Enter %p\n", __FUNCTION__
, net
);
8424 if (!dhd_download_fw_on_driverload
) {
8425 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
8426 DHD_OS_WAKE_LOCK_INIT(dhd
);
8427 dhd
->dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
8429 #ifdef SHOW_LOGTRACE
8430 skb_queue_head_init(&dhd
->evt_trace_queue
);
8432 if (!(dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
)) {
8433 ret
= dhd_init_logstrs_array(dhd
->pub
.osh
, &dhd
->event_data
);
8434 if (ret
== BCME_OK
) {
8435 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
8436 st_str_file_path
, map_file_path
);
8437 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
8438 rom_st_str_file_path
, rom_map_file_path
);
8439 dhd
->dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
8442 #endif /* SHOW_LOGTRACE */
8445 #if defined(PREVENT_REOPEN_DURING_HANG)
8446 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
8447 if (dhd
->pub
.hang_was_sent
== 1) {
8448 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__
));
8449 /* Force to bring down WLAN interface in case dhd_stop() is not called
8450 * from the upper layer when HANG event is triggered.
8452 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== 1) {
8453 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__
));
8459 #endif /* PREVENT_REOPEN_DURING_HANG */
8462 DHD_OS_WAKE_LOCK(&dhd
->pub
);
8463 DHD_PERIM_LOCK(&dhd
->pub
);
8464 dhd
->pub
.dongle_trap_occured
= 0;
8465 dhd
->pub
.hang_was_sent
= 0;
8466 dhd
->pub
.hang_reason
= 0;
8467 dhd
->pub
.iovar_timeout_occured
= 0;
8468 #ifdef PCIE_FULL_DONGLE
8469 dhd
->pub
.d3ack_timeout_occured
= 0;
8470 #endif /* PCIE_FULL_DONGLE */
8472 #ifdef DHD_LOSSLESS_ROAMING
8473 dhd
->pub
.dequeue_prec_map
= ALLPRIO
;
8477 * Force start if ifconfig_up gets called before START command
8478 * We keep WEXT's wl_control_wl_start to provide backward compatibility
8479 * This should be removed in the future
8481 ret
= wl_control_wl_start(net
);
8483 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8489 ifidx
= dhd_net2idx(dhd
, net
);
8490 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
8493 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__
));
8498 if (!dhd
->iflist
[ifidx
]) {
8499 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__
));
8505 atomic_set(&dhd
->pend_8021x_cnt
, 0);
8506 if (!dhd_download_fw_on_driverload
) {
8507 DHD_ERROR(("\n%s\n", dhd_version
));
8508 #ifdef WL_EXT_IAPSTA
8509 wl_ext_iapsta_attach_netdev(net
, ifidx
, dhd
->iflist
[ifidx
]->bssidx
);
8511 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
8512 g_first_broadcast_scan
= TRUE
;
8514 #if defined(BT_OVER_SDIO)
8515 ret
= dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
8516 wl_android_set_wifi_on_flag(TRUE
);
8518 ret
= wl_android_wifi_on(net
);
8519 #endif /* BT_OVER_SDIO */
8521 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8522 __FUNCTION__
, ret
));
8526 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8527 conf
= dhd_get_conf(net
);
8529 wl_android_ext_priv_cmd(net
, conf
->isam_init
, 0, &bytes_written
);
8530 wl_android_ext_priv_cmd(net
, conf
->isam_config
, 0, &bytes_written
);
8531 wl_android_ext_priv_cmd(net
, conf
->isam_enable
, 0, &bytes_written
);
8535 #ifdef FIX_CPU_MIN_CLOCK
8536 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
) {
8537 dhd_init_cpufreq_fix(dhd
);
8538 dhd_fix_cpu_freq(dhd
);
8540 #endif /* FIX_CPU_MIN_CLOCK */
8541 #if defined(OOB_INTR_ONLY)
8542 if (dhd
->pub
.conf
->dpc_cpucore
>= 0) {
8543 dhd_bus_get_ids(dhd
->pub
.bus
, &bus_type
, &bus_num
, &slot_num
);
8544 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
8546 printf("%s: set irq affinity hit %d\n", __FUNCTION__
, dhd
->pub
.conf
->dpc_cpucore
);
8547 irq_set_affinity_hint(adapter
->irq_num
, cpumask_of(dhd
->pub
.conf
->dpc_cpucore
));
8552 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
8554 dhd_set_path(&dhd
->pub
);
8556 wait_event_interruptible_timeout(dhd
->adapter
->status_event
,
8557 wifi_get_adapter_status(dhd
->adapter
, WIFI_STATUS_FW_READY
),
8558 msecs_to_jiffies(DHD_FW_READY_TIMEOUT
));
8560 if ((ret
= dbus_up(dhd
->pub
.bus
)) != 0) {
8561 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__
, ret
));
8564 dhd
->pub
.busstate
= DHD_BUS_DATA
;
8566 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
8567 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8571 /* try to bring up bus */
8572 DHD_PERIM_UNLOCK(&dhd
->pub
);
8573 ret
= dhd_bus_start(&dhd
->pub
);
8574 DHD_PERIM_LOCK(&dhd
->pub
);
8576 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8580 #endif /* !BCMDBUS */
8583 #ifdef WL_EXT_IAPSTA
8584 wl_ext_iapsta_attach_name(net
, ifidx
);
8586 if (dhd_download_fw_on_driverload
) {
8587 if (dhd
->pub
.conf
->deepsleep
)
8588 dhd_deepsleep(dhd
, 0);
8592 config
.config_id
= DBUS_CONFIG_ID_AGGR_LIMIT
;
8595 memset(iovbuf
, 0, sizeof(iovbuf
));
8596 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit
, 4,
8597 iovbuf
, sizeof(iovbuf
));
8599 if (!dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0)) {
8600 agglimit
= *(uint32
*)iovbuf
;
8601 config
.aggr_param
.maxrxsf
= agglimit
>> BCM_RPC_TP_AGG_SF_SHIFT
;
8602 config
.aggr_param
.maxrxsize
= agglimit
& BCM_RPC_TP_AGG_BYTES_MASK
;
8603 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
8604 agglimit
, config
.aggr_param
.maxrxsf
, config
.aggr_param
.maxrxsize
));
8605 if (bcm_rpc_tp_set_config(dhd
->pub
.info
->rpc_th
, &config
)) {
8606 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
8609 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
8610 rpc_agg
&= ~BCM_RPC_TP_DNGL_AGG_DPC
;
8613 /* Set aggregation for TX */
8614 bcm_rpc_tp_agg_set(dhd
->pub
.info
->rpc_th
, BCM_RPC_TP_HOST_AGG_MASK
,
8615 rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
);
8617 /* Set aggregation for RX */
8618 memset(iovbuf
, 0, sizeof(iovbuf
));
8619 bcm_mkiovar("rpc_agg", (char *)&rpc_agg
, sizeof(rpc_agg
), iovbuf
, sizeof(iovbuf
));
8620 if (!dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) {
8621 dhd
->pub
.info
->fdaggr
= 0;
8622 if (rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
)
8623 dhd
->pub
.info
->fdaggr
|= BCM_FDAGGR_H2D_ENABLED
;
8624 if (rpc_agg
& BCM_RPC_TP_DNGL_AGG_MASK
)
8625 dhd
->pub
.info
->fdaggr
|= BCM_FDAGGR_D2H_ENABLED
;
8627 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__
, ret
));
8629 #endif /* BCM_FD_AGGR */
8632 if (dhd
->pub
.is_bt_recovery_required
) {
8633 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__
));
8634 bcmsdh_btsdio_process_dhd_hang_notification(TRUE
);
8636 dhd
->pub
.is_bt_recovery_required
= FALSE
;
8639 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8640 memcpy(net
->dev_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
8643 /* Get current TOE mode from dongle */
8644 if (dhd_toe_get(dhd
, ifidx
, &toe_ol
) >= 0 && (toe_ol
& TOE_TX_CSUM_OL
) != 0) {
8645 dhd
->iflist
[ifidx
]->net
->features
|= NETIF_F_IP_CSUM
;
8647 dhd
->iflist
[ifidx
]->net
->features
&= ~NETIF_F_IP_CSUM
;
8651 #if defined(DHD_LB_RXP)
8652 __skb_queue_head_init(&dhd
->rx_pend_queue
);
8653 if (dhd
->rx_napi_netdev
== NULL
) {
8654 dhd
->rx_napi_netdev
= dhd
->iflist
[ifidx
]->net
;
8655 memset(&dhd
->rx_napi_struct
, 0, sizeof(struct napi_struct
));
8656 netif_napi_add(dhd
->rx_napi_netdev
, &dhd
->rx_napi_struct
,
8657 dhd_napi_poll
, dhd_napi_weight
);
8658 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8659 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
8660 napi_enable(&dhd
->rx_napi_struct
);
8661 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__
));
8662 skb_queue_head_init(&dhd
->rx_napi_queue
);
8663 } /* rx_napi_netdev == NULL */
8664 #endif /* DHD_LB_RXP */
8666 #if defined(DHD_LB_TXP)
8667 /* Use the variant that uses locks */
8668 skb_queue_head_init(&dhd
->tx_pend_queue
);
8669 #endif /* DHD_LB_TXP */
8671 #if defined(WL_CFG80211)
8672 if (unlikely(wl_cfg80211_up(net
))) {
8673 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__
));
8677 if (!dhd_download_fw_on_driverload
) {
8678 #ifdef ARP_OFFLOAD_SUPPORT
8679 dhd
->pend_ipaddr
= 0;
8680 if (!dhd_inetaddr_notifier_registered
) {
8681 dhd_inetaddr_notifier_registered
= TRUE
;
8682 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
8684 #endif /* ARP_OFFLOAD_SUPPORT */
8685 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8686 if (!dhd_inet6addr_notifier_registered
) {
8687 dhd_inet6addr_notifier_registered
= TRUE
;
8688 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
8690 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8693 argos_register_notifier_init(net
);
8694 #if defined(NUM_SCB_MAX_PROBE)
8695 dhd_set_scb_probe(&dhd
->pub
);
8696 #endif /* NUM_SCB_MAX_PROBE */
8697 #endif /* WL_CFG80211 */
8700 /* Allow transmit calls */
8701 netif_start_queue(net
);
8704 OLD_MOD_INC_USE_COUNT
;
8707 dhd_dbgfs_init(&dhd
->pub
);
8715 DHD_PERIM_UNLOCK(&dhd
->pub
);
8716 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
8719 printf("%s: Exit ret=%d\n", __FUNCTION__
, ret
);
8723 int dhd_do_driver_init(struct net_device
*net
)
8725 dhd_info_t
*dhd
= NULL
;
8728 DHD_ERROR(("Primary Interface not initialized \n"));
8732 DHD_MUTEX_IS_LOCK_RETURN();
8734 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8735 dhd
= DHD_DEV_INFO(net
);
8737 /* If driver is already initialized, do nothing
8739 if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
8740 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8744 if (dhd_open(net
) < 0) {
8745 DHD_ERROR(("Driver Init Failed \n"));
8753 dhd_event_ifadd(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8757 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8758 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
8762 /* handle IF event caused by wl commands, SoftAP, WEXT and
8763 * anything else. This has to be done asynchronously otherwise
8764 * DPC will be blocked (and iovars will timeout as DPC has no chance
8765 * to read the response back)
8767 if (ifevent
->ifidx
> 0) {
8768 dhd_if_event_t
*if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8769 if (if_event
== NULL
) {
8770 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8771 MALLOCED(dhdinfo
->pub
.osh
)));
8775 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8776 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8777 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8778 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8779 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
,
8780 DHD_WQ_WORK_IF_ADD
, dhd_ifadd_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8787 dhd_event_ifdel(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8789 dhd_if_event_t
*if_event
;
8792 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8793 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
8795 #endif /* WL_CFG80211 */
8797 /* handle IF event caused by wl commands, SoftAP, WEXT and
8800 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8801 if (if_event
== NULL
) {
8802 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8803 MALLOCED(dhdinfo
->pub
.osh
)));
8806 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8807 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8808 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8809 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8810 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_DEL
,
8811 dhd_ifdel_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8817 dhd_event_ifchange(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8819 #ifdef DHD_UPDATE_INTF_MAC
8820 dhd_if_event_t
*if_event
;
8821 #endif /* DHD_UPDATE_INTF_MAC */
8824 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8825 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
);
8826 #endif /* WL_CFG80211 */
8828 #ifdef DHD_UPDATE_INTF_MAC
8829 /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
8832 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8833 if (if_event
== NULL
) {
8834 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8835 MALLOCED(dhdinfo
->pub
.osh
)));
8838 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8839 // construct a change event
8840 if_event
->event
.ifidx
= dhd_ifname2idx(dhdinfo
, name
);
8841 if_event
->event
.opcode
= WLC_E_IF_CHANGE
;
8842 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8843 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8844 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8845 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_UPDATE
,
8846 dhd_ifupdate_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8847 #endif /* DHD_UPDATE_INTF_MAC */
8852 /* unregister and free the existing net_device interface (if any) in iflist and
8853 * allocate a new one. the slot is reused. this function does NOT register the
8854 * new interface to linux kernel. dhd_register_if does the job
8857 dhd_allocate_if(dhd_pub_t
*dhdpub
, int ifidx
, const char *name
,
8858 uint8
*mac
, uint8 bssidx
, bool need_rtnl_lock
, const char *dngl_name
)
8860 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
8863 ASSERT(dhdinfo
&& (ifidx
< DHD_MAX_IFS
));
8864 ifp
= dhdinfo
->iflist
[ifidx
];
8867 if (ifp
->net
!= NULL
) {
8868 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8869 __FUNCTION__
, ifp
->net
->name
, ifidx
));
8872 /* For primary ifidx (0), there shouldn't be
8873 * any netdev present already.
8875 DHD_ERROR(("Primary ifidx populated already\n"));
8880 dhd_dev_priv_clear(ifp
->net
); /* clear net_device private */
8882 /* in unregister_netdev case, the interface gets freed by net->destructor
8883 * (which is set to free_netdev)
8885 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
8886 free_netdev(ifp
->net
);
8888 netif_stop_queue(ifp
->net
);
8890 unregister_netdev(ifp
->net
);
8892 unregister_netdevice(ifp
->net
);
8897 ifp
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_t
));
8899 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__
, sizeof(dhd_if_t
)));
8904 memset(ifp
, 0, sizeof(dhd_if_t
));
8905 ifp
->info
= dhdinfo
;
8907 ifp
->bssidx
= bssidx
;
8908 #ifdef DHD_MCAST_REGEN
8909 ifp
->mcast_regen_bss_enable
= FALSE
;
8911 /* set to TRUE rx_pkt_chainable at alloc time */
8912 ifp
->rx_pkt_chainable
= TRUE
;
8915 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
8917 /* Allocate etherdev, including space for private structure */
8918 ifp
->net
= alloc_etherdev(DHD_DEV_PRIV_SIZE
);
8919 if (ifp
->net
== NULL
) {
8920 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__
, sizeof(dhdinfo
)));
8924 /* Setup the dhd interface's netdevice private structure. */
8925 dhd_dev_priv_save(ifp
->net
, dhdinfo
, ifp
, ifidx
);
8927 if (name
&& name
[0]) {
8928 strncpy(ifp
->net
->name
, name
, IFNAMSIZ
);
8929 ifp
->net
->name
[IFNAMSIZ
- 1] = '\0';
8934 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8935 ifp
->net
->destructor
= free_netdev
;
8937 ifp
->net
->needs_free_netdev
= true;
8938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8940 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8941 ifp
->net
->destructor
= dhd_netdev_free
;
8943 ifp
->net
->needs_free_netdev
= true;
8944 ifp
->net
->priv_destructor
= dhd_netdev_free
;
8945 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8948 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8949 ifp
->net
->destructor
= free_netdev
;
8951 ifp
->net
->needs_free_netdev
= true;
8953 #endif /* WL_CFG80211 */
8954 strncpy(ifp
->name
, ifp
->net
->name
, IFNAMSIZ
);
8955 ifp
->name
[IFNAMSIZ
- 1] = '\0';
8956 dhdinfo
->iflist
[ifidx
] = ifp
;
8958 /* initialize the dongle provided if name */
8960 strncpy(ifp
->dngl_name
, dngl_name
, IFNAMSIZ
);
8962 strncpy(ifp
->dngl_name
, name
, IFNAMSIZ
);
8964 #ifdef PCIE_FULL_DONGLE
8965 /* Initialize STA info list */
8966 INIT_LIST_HEAD(&ifp
->sta_list
);
8967 DHD_IF_STA_LIST_LOCK_INIT(ifp
);
8968 #endif /* PCIE_FULL_DONGLE */
8970 #ifdef DHD_L2_FILTER
8971 ifp
->phnd_arp_table
= init_l2_filter_arp_table(dhdpub
->osh
);
8972 ifp
->parp_allnode
= TRUE
;
8973 #endif /* DHD_L2_FILTER */
8976 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
8982 if (ifp
->net
!= NULL
) {
8983 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8984 if (ifp
->net
== dhdinfo
->rx_napi_netdev
) {
8985 napi_disable(&dhdinfo
->rx_napi_struct
);
8986 netif_napi_del(&dhdinfo
->rx_napi_struct
);
8987 skb_queue_purge(&dhdinfo
->rx_napi_queue
);
8988 dhdinfo
->rx_napi_netdev
= NULL
;
8990 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8991 dhd_dev_priv_clear(ifp
->net
);
8992 free_netdev(ifp
->net
);
8995 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
8998 dhdinfo
->iflist
[ifidx
] = NULL
;
9002 /* unregister and free the the net_device interface associated with the indexed
9003 * slot, also free the slot memory and set the slot pointer to NULL
9006 dhd_remove_if(dhd_pub_t
*dhdpub
, int ifidx
, bool need_rtnl_lock
)
9008 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
9010 #ifdef PCIE_FULL_DONGLE
9011 if_flow_lkup_t
*if_flow_lkup
= (if_flow_lkup_t
*)dhdpub
->if_flow_lkup
;
9012 #endif /* PCIE_FULL_DONGLE */
9014 ifp
= dhdinfo
->iflist
[ifidx
];
9017 if (ifp
->net
!= NULL
) {
9018 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp
->net
->name
, ifp
->idx
));
9020 dhdinfo
->iflist
[ifidx
] = NULL
;
9021 /* in unregister_netdev case, the interface gets freed by net->destructor
9022 * (which is set to free_netdev)
9024 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
9025 free_netdev(ifp
->net
);
9027 netif_tx_disable(ifp
->net
);
9031 #if defined(SET_RPS_CPUS)
9032 custom_rps_map_clear(ifp
->net
->_rx
);
9033 #endif /* SET_RPS_CPUS */
9035 unregister_netdev(ifp
->net
);
9037 unregister_netdevice(ifp
->net
);
9042 dhd_wmf_cleanup(dhdpub
, ifidx
);
9043 #endif /* DHD_WMF */
9044 #ifdef DHD_L2_FILTER
9045 bcm_l2_filter_arp_table_update(dhdpub
->osh
, ifp
->phnd_arp_table
, TRUE
,
9046 NULL
, FALSE
, dhdpub
->tickcnt
);
9047 deinit_l2_filter_arp_table(dhdpub
->osh
, ifp
->phnd_arp_table
);
9048 ifp
->phnd_arp_table
= NULL
;
9049 #endif /* DHD_L2_FILTER */
9052 dhd_if_del_sta_list(ifp
);
9053 #ifdef PCIE_FULL_DONGLE
9054 /* Delete flowrings of WDS interface */
9055 if (if_flow_lkup
[ifidx
].role
== WLC_E_IF_ROLE_WDS
) {
9056 dhd_flow_rings_delete(dhdpub
, ifidx
);
9058 #endif /* PCIE_FULL_DONGLE */
9059 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
9061 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
9069 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9070 static struct net_device_ops dhd_ops_pri
= {
9071 .ndo_open
= dhd_open
,
9072 .ndo_stop
= dhd_stop
,
9073 .ndo_get_stats
= dhd_get_stats
,
9074 .ndo_do_ioctl
= dhd_ioctl_entry
,
9075 .ndo_start_xmit
= dhd_start_xmit
,
9076 .ndo_set_mac_address
= dhd_set_mac_address
,
9077 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9078 .ndo_set_rx_mode
= dhd_set_multicast_list
,
9080 .ndo_set_multicast_list
= dhd_set_multicast_list
,
9084 static struct net_device_ops dhd_ops_virt
= {
9085 .ndo_get_stats
= dhd_get_stats
,
9086 .ndo_do_ioctl
= dhd_ioctl_entry
,
9087 .ndo_start_xmit
= dhd_start_xmit
,
9088 .ndo_set_mac_address
= dhd_set_mac_address
,
9089 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9090 .ndo_set_rx_mode
= dhd_set_multicast_list
,
9092 .ndo_set_multicast_list
= dhd_set_multicast_list
,
9095 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
9098 extern void debugger_init(void *bus_handle
);
9102 #ifdef SHOW_LOGTRACE
9104 dhd_os_read_file(void *file
, char *buf
, uint32 size
)
9106 struct file
*filep
= (struct file
*)file
;
9111 return vfs_read(filep
, buf
, size
, &filep
->f_pos
);
9115 dhd_os_seek_file(void *file
, int64 offset
)
9117 struct file
*filep
= (struct file
*)file
;
9121 /* offset can be -ve */
9122 filep
->f_pos
= filep
->f_pos
+ offset
;
9128 dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
)
9130 struct file
*filep
= NULL
;
9133 char *raw_fmts
= NULL
;
9134 int logstrs_size
= 0;
9140 filep
= filp_open(logstrs_path
, O_RDONLY
, 0);
9142 if (IS_ERR(filep
)) {
9143 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__
, logstrs_path
));
9146 error
= vfs_stat(logstrs_path
, &stat
);
9148 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__
, logstrs_path
));
9151 logstrs_size
= (int) stat
.size
;
9153 if (logstrs_size
== 0) {
9154 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9158 raw_fmts
= MALLOC(osh
, logstrs_size
);
9159 if (raw_fmts
== NULL
) {
9160 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__
));
9163 if (vfs_read(filep
, raw_fmts
, logstrs_size
, &filep
->f_pos
) != logstrs_size
) {
9164 DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__
, logstrs_path
));
9168 if (dhd_parse_logstrs_file(osh
, raw_fmts
, logstrs_size
, temp
)
9170 filp_close(filep
, NULL
);
9177 MFREE(osh
, raw_fmts
, logstrs_size
);
9183 filp_close(filep
, NULL
);
9191 dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
9194 struct file
*filep
= NULL
;
9196 int err
= BCME_ERROR
;
9198 if (fname
== NULL
) {
9199 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__
));
9206 filep
= filp_open(fname
, O_RDONLY
, 0);
9207 if (IS_ERR(filep
)) {
9208 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__
, fname
));
9212 if ((err
= dhd_parse_map_file(osh
, filep
, ramstart
,
9213 rodata_start
, rodata_end
)) < 0)
9218 filp_close(filep
, NULL
);
9226 dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
, char *map_file
)
9228 struct file
*filep
= NULL
;
9230 char *raw_fmts
= NULL
;
9231 uint32 logstrs_size
= 0;
9234 uint32 ramstart
= 0;
9235 uint32 rodata_start
= 0;
9236 uint32 rodata_end
= 0;
9237 uint32 logfilebase
= 0;
9239 error
= dhd_read_map(osh
, map_file
, &ramstart
, &rodata_start
, &rodata_end
);
9240 if (error
!= BCME_OK
) {
9241 DHD_ERROR(("readmap Error!! \n"));
9242 /* don't do event log parsing in actual case */
9243 if (strstr(str_file
, ram_file_str
) != NULL
) {
9244 temp
->raw_sstr
= NULL
;
9245 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9246 temp
->rom_raw_sstr
= NULL
;
9250 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9251 ramstart
, rodata_start
, rodata_end
));
9256 filep
= filp_open(str_file
, O_RDONLY
, 0);
9257 if (IS_ERR(filep
)) {
9258 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__
, str_file
));
9262 /* Full file size is huge. Just read required part */
9263 logstrs_size
= rodata_end
- rodata_start
;
9265 if (logstrs_size
== 0) {
9266 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9270 raw_fmts
= MALLOC(osh
, logstrs_size
);
9271 if (raw_fmts
== NULL
) {
9272 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__
));
9276 logfilebase
= rodata_start
- ramstart
;
9278 error
= generic_file_llseek(filep
, logfilebase
, SEEK_SET
);
9280 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__
, str_file
, error
));
9284 error
= vfs_read(filep
, raw_fmts
, logstrs_size
, (&filep
->f_pos
));
9285 if (error
!= logstrs_size
) {
9286 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__
, str_file
, error
));
9290 if (strstr(str_file
, ram_file_str
) != NULL
) {
9291 temp
->raw_sstr
= raw_fmts
;
9292 temp
->raw_sstr_size
= logstrs_size
;
9293 temp
->ramstart
= ramstart
;
9294 temp
->rodata_start
= rodata_start
;
9295 temp
->rodata_end
= rodata_end
;
9296 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9297 temp
->rom_raw_sstr
= raw_fmts
;
9298 temp
->rom_raw_sstr_size
= logstrs_size
;
9299 temp
->rom_ramstart
= ramstart
;
9300 temp
->rom_rodata_start
= rodata_start
;
9301 temp
->rom_rodata_end
= rodata_end
;
9304 filp_close(filep
, NULL
);
9311 MFREE(osh
, raw_fmts
, logstrs_size
);
9317 filp_close(filep
, NULL
);
9321 if (strstr(str_file
, ram_file_str
) != NULL
) {
9322 temp
->raw_sstr
= NULL
;
9323 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9324 temp
->rom_raw_sstr
= NULL
;
9330 #endif /* SHOW_LOGTRACE */
9334 dhd_get_rxsz(dhd_pub_t
*pub
)
9336 struct net_device
*net
= NULL
;
9337 dhd_info_t
*dhd
= NULL
;
9340 /* Assign rxsz for dbus_attach */
9342 net
= dhd
->iflist
[0]->net
;
9343 net
->hard_header_len
= ETH_HLEN
+ pub
->hdrlen
;
9344 rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
9350 dhd_set_path(dhd_pub_t
*pub
)
9352 dhd_info_t
*dhd
= NULL
;
9356 /* try to download image and nvram to the dongle */
9357 if (dhd_update_fw_nv_path(dhd
) && dhd
->pub
.bus
) {
9358 DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
9359 __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
, dhd
->conf_path
));
9360 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
9361 dhd
->fw_path
, dhd
->nv_path
, dhd
->clm_path
, dhd
->conf_path
);
9367 dhd_attach(osl_t
*osh
, struct dhd_bus
*bus
, uint bus_hdrlen
9373 dhd_info_t
*dhd
= NULL
;
9374 struct net_device
*net
= NULL
;
9375 char if_name
[IFNAMSIZ
] = {'\0'};
9376 #ifdef SHOW_LOGTRACE
9378 #endif /* SHOW_LOGTRACE */
9379 #if defined(BCMSDIO) || defined(BCMPCIE)
9380 uint32 bus_type
= -1;
9381 uint32 bus_num
= -1;
9382 uint32 slot_num
= -1;
9383 wifi_adapter_info_t
*adapter
= NULL
;
9384 #elif defined(BCMDBUS)
9385 wifi_adapter_info_t
*adapter
= data
;
9387 #ifdef GET_CUSTOM_MAC_ENABLE
9389 #endif /* GET_CUSTOM_MAC_ENABLE */
9391 dhd_attach_states_t dhd_state
= DHD_ATTACH_STATE_INIT
;
9392 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9395 DHD_ERROR(("%s\n", driver_target
));
9396 #endif /* STBLINUX */
9397 /* will implement get_ids for DBUS later */
9398 #if defined(BCMSDIO)
9399 dhd_bus_get_ids(bus
, &bus_type
, &bus_num
, &slot_num
);
9401 #if defined(BCMSDIO) || defined(BCMPCIE)
9402 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
9405 /* Allocate primary dhd_info */
9406 dhd
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_DHD_INFO
, sizeof(dhd_info_t
));
9408 dhd
= MALLOC(osh
, sizeof(dhd_info_t
));
9410 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__
));
9414 memset(dhd
, 0, sizeof(dhd_info_t
));
9415 dhd_state
|= DHD_ATTACH_STATE_DHD_ALLOC
;
9417 dhd
->unit
= dhd_found
+ instance_base
; /* do not increment dhd_found, yet */
9420 #ifdef DUMP_IOCTL_IOV_LIST
9421 dll_init(&(dhd
->pub
.dump_iovlist_head
));
9422 #endif /* DUMP_IOCTL_IOV_LIST */
9423 dhd
->adapter
= adapter
;
9424 dhd
->pub
.adapter
= (void *)adapter
;
9426 dll_init(&(dhd
->pub
.mw_list_head
));
9427 #endif /* DHD_DEBUG */
9429 dhd
->pub
.is_bt_recovery_required
= FALSE
;
9430 mutex_init(&dhd
->bus_user_lock
);
9431 #endif /* BT_OVER_SDIO */
9433 #ifdef GET_CUSTOM_MAC_ENABLE
9434 wifi_platform_get_mac_addr(dhd
->adapter
, hw_ether
);
9435 bcopy(hw_ether
, dhd
->pub
.mac
.octet
, sizeof(struct ether_addr
));
9436 #endif /* GET_CUSTOM_MAC_ENABLE */
9437 #ifdef CUSTOM_FORCE_NODFS_FLAG
9438 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
9439 dhd
->pub
.force_country_change
= TRUE
;
9440 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9441 #ifdef CUSTOM_COUNTRY_CODE
9442 get_customized_country_code(dhd
->adapter
,
9443 dhd
->pub
.dhd_cspec
.country_abbrev
, &dhd
->pub
.dhd_cspec
,
9444 dhd
->pub
.dhd_cflags
);
9445 #endif /* CUSTOM_COUNTRY_CODE */
9447 dhd
->thr_dpc_ctl
.thr_pid
= DHD_PID_KT_TL_INVALID
;
9448 dhd
->thr_wdt_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9450 dhd
->pub
.wet_info
= dhd_get_wet_info(&dhd
->pub
);
9451 #endif /* DHD_WET */
9452 /* Initialize thread based operation and lock */
9453 sema_init(&dhd
->sdsem
, 1);
9454 #endif /* !BCMDBUS */
9456 /* Link to info module */
9457 dhd
->pub
.info
= dhd
;
9460 /* Link to bus module */
9462 dhd
->pub
.hdrlen
= bus_hdrlen
;
9464 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
9465 * because dhd_detech will check .info is NULL or not.
9467 if (dhd_conf_attach(&dhd
->pub
) != 0) {
9468 DHD_ERROR(("dhd_conf_attach failed\n"));
9472 dhd_conf_reset(&dhd
->pub
);
9473 dhd_conf_set_chiprev(&dhd
->pub
, dhd_bus_chip(bus
), dhd_bus_chiprev(bus
));
9474 dhd_conf_preinit(&dhd
->pub
);
9475 #endif /* !BCMDBUS */
9477 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9478 * This is indeed a hack but we have to make it work properly before we have a better
9481 dhd_update_fw_nv_path(dhd
);
9483 /* Set network interface name if it was provided as module parameter */
9484 if (iface_name
[0]) {
9487 strncpy(if_name
, iface_name
, IFNAMSIZ
);
9488 if_name
[IFNAMSIZ
- 1] = 0;
9489 len
= strlen(if_name
);
9490 ch
= if_name
[len
- 1];
9491 if ((ch
> '9' || ch
< '0') && (len
< IFNAMSIZ
- 2))
9492 strncat(if_name
, "%d", 2);
9495 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9496 net
= dhd_allocate_if(&dhd
->pub
, 0, if_name
, NULL
, 0, TRUE
, NULL
);
9502 dhd_state
|= DHD_ATTACH_STATE_ADD_IF
;
9503 #ifdef DHD_L2_FILTER
9504 /* initialize the l2_filter_cnt */
9505 dhd
->pub
.l2_filter_cnt
= 0;
9507 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9510 net
->netdev_ops
= NULL
;
9513 mutex_init(&dhd
->dhd_iovar_mutex
);
9514 sema_init(&dhd
->proto_sem
, 1);
9516 if (!(dhd_ulp_init(osh
, &dhd
->pub
)))
9518 #endif /* DHD_ULP */
9520 #if defined(DHD_HANG_SEND_UP_TEST)
9521 dhd
->pub
.req_hang_type
= 0;
9522 #endif /* DHD_HANG_SEND_UP_TEST */
9524 #ifdef PROP_TXSTATUS
9525 spin_lock_init(&dhd
->wlfc_spinlock
);
9527 dhd
->pub
.skip_fc
= dhd_wlfc_skip_fc
;
9528 dhd
->pub
.plat_init
= dhd_wlfc_plat_init
;
9529 dhd
->pub
.plat_deinit
= dhd_wlfc_plat_deinit
;
9531 #ifdef DHD_WLFC_THREAD
9532 init_waitqueue_head(&dhd
->pub
.wlfc_wqhead
);
9533 dhd
->pub
.wlfc_thread
= kthread_create(dhd_wlfc_transfer_packets
, &dhd
->pub
, "wlfc-thread");
9534 if (IS_ERR(dhd
->pub
.wlfc_thread
)) {
9535 DHD_ERROR(("create wlfc thread failed\n"));
9538 wake_up_process(dhd
->pub
.wlfc_thread
);
9540 #endif /* DHD_WLFC_THREAD */
9541 #endif /* PROP_TXSTATUS */
9543 /* Initialize other structure content */
9544 init_waitqueue_head(&dhd
->ioctl_resp_wait
);
9545 init_waitqueue_head(&dhd
->d3ack_wait
);
9547 init_waitqueue_head(&dhd
->ds_exit_wait
);
9548 #endif /* PCIE_INB_DW */
9549 init_waitqueue_head(&dhd
->ctrl_wait
);
9550 init_waitqueue_head(&dhd
->dhd_bus_busy_state_wait
);
9551 dhd
->pub
.dhd_bus_busy_state
= 0;
9553 /* Initialize the spinlocks */
9554 spin_lock_init(&dhd
->sdlock
);
9555 spin_lock_init(&dhd
->txqlock
);
9556 spin_lock_init(&dhd
->rxqlock
);
9557 spin_lock_init(&dhd
->dhd_lock
);
9558 spin_lock_init(&dhd
->rxf_lock
);
9560 spin_lock_init(&dhd
->pub
.tdls_lock
);
9562 #if defined(RXFRAME_THREAD)
9563 dhd
->rxthread_enabled
= TRUE
;
9564 #endif /* defined(RXFRAME_THREAD) */
9566 #ifdef DHDTCPACK_SUPPRESS
9567 spin_lock_init(&dhd
->tcpack_lock
);
9568 #endif /* DHDTCPACK_SUPPRESS */
9570 /* Initialize Wakelock stuff */
9571 spin_lock_init(&dhd
->wakelock_spinlock
);
9572 spin_lock_init(&dhd
->wakelock_evt_spinlock
);
9573 DHD_OS_WAKE_LOCK_INIT(dhd
);
9574 dhd
->wakelock_counter
= 0;
9575 #ifdef CONFIG_HAS_WAKELOCK
9576 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9577 wake_lock_init(&dhd
->wl_wifi
, WAKE_LOCK_SUSPEND
, "wlan_wake");
9578 wake_lock_init(&dhd
->wl_wdwake
, WAKE_LOCK_SUSPEND
, "wlan_wd_wake");
9579 #endif /* CONFIG_HAS_WAKELOCK */
9581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9582 mutex_init(&dhd
->dhd_net_if_mutex
);
9583 mutex_init(&dhd
->dhd_suspend_mutex
);
9584 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9585 mutex_init(&dhd
->dhd_apf_mutex
);
9586 #endif /* PKT_FILTER_SUPPORT && APF */
9588 dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
9590 /* Attach and link in the protocol */
9591 if (dhd_prot_attach(&dhd
->pub
) != 0) {
9592 DHD_ERROR(("dhd_prot_attach failed\n"));
9595 dhd_state
|= DHD_ATTACH_STATE_PROT_ATTACH
;
9598 /* attach the timesync module */
9599 if (dhd_timesync_attach(&dhd
->pub
) != 0) {
9600 DHD_ERROR(("dhd_timesync_attach failed\n"));
9603 dhd_state
|= DHD_ATTACH_TIMESYNC_ATTACH_DONE
;
9604 #endif /* DHD_TIMESYNC */
9607 spin_lock_init(&dhd
->pub
.up_lock
);
9608 /* Attach and link in the cfg80211 */
9609 if (unlikely(wl_cfg80211_attach(net
, &dhd
->pub
))) {
9610 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9614 dhd_monitor_init(&dhd
->pub
);
9615 dhd_state
|= DHD_ATTACH_STATE_CFG80211
;
9618 dhd_log_dump_init(&dhd
->pub
);
9619 #endif /* DHD_LOG_DUMP */
9620 #if defined(WL_WIRELESS_EXT)
9621 /* Attach and link in the iw */
9622 if (!(dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
9623 if (wl_iw_attach(net
, (void *)&dhd
->pub
) != 0) {
9624 DHD_ERROR(("wl_iw_attach failed\n"));
9627 dhd_state
|= DHD_ATTACH_STATE_WL_ATTACH
;
9630 wl_escan_attach(net
, &dhd
->pub
);
9631 #endif /* WL_ESCAN */
9632 #endif /* defined(WL_WIRELESS_EXT) */
9634 #ifdef SHOW_LOGTRACE
9635 ret
= dhd_init_logstrs_array(osh
, &dhd
->event_data
);
9636 if (ret
== BCME_OK
) {
9637 dhd_init_static_strs_array(osh
, &dhd
->event_data
, st_str_file_path
, map_file_path
);
9638 dhd_init_static_strs_array(osh
, &dhd
->event_data
, rom_st_str_file_path
,
9640 dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
9642 #endif /* SHOW_LOGTRACE */
9645 /* attach debug if support */
9646 if (dhd_os_dbg_attach(&dhd
->pub
)) {
9647 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__
));
9652 dhd
->pub
.dbg
->pkt_mon_lock
= dhd_os_spin_lock_init(dhd
->pub
.osh
);
9653 #ifdef DBG_PKT_MON_INIT_DEFAULT
9654 dhd_os_dbg_attach_pkt_monitor(&dhd
->pub
);
9655 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9656 #endif /* DBG_PKT_MON */
9657 #endif /* DEBUGABILITY */
9658 #ifdef DHD_PKT_LOGGING
9659 dhd_os_attach_pktlog(&dhd
->pub
);
9660 #endif /* DHD_PKT_LOGGING */
9662 if (dhd_sta_pool_init(&dhd
->pub
, DHD_MAX_STA
) != BCME_OK
) {
9663 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__
, DHD_MAX_STA
));
9670 /* Set up the watchdog timer */
9671 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9672 timer_setup(&dhd
->timer
, dhd_watchdog
, 0);
9674 init_timer(&dhd
->timer
);
9675 dhd
->timer
.data
= (ulong
)dhd
;
9676 dhd
->timer
.function
= dhd_watchdog
;
9678 dhd
->default_wd_interval
= dhd_watchdog_ms
;
9680 if (dhd_watchdog_prio
>= 0) {
9681 /* Initialize watchdog thread */
9682 PROC_START(dhd_watchdog_thread
, dhd
, &dhd
->thr_wdt_ctl
, 0, "dhd_watchdog_thread");
9683 if (dhd
->thr_wdt_ctl
.thr_pid
< 0) {
9688 dhd
->thr_wdt_ctl
.thr_pid
= -1;
9691 #ifdef DHD_PCIE_RUNTIMEPM
9692 /* Setup up the runtime PM Idlecount timer */
9693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9694 timer_setup(&dhd
->rpm_timer
, dhd_runtimepm
, 0);
9696 init_timer(&dhd
->rpm_timer
);
9697 dhd
->rpm_timer
.data
= (ulong
)dhd
;
9698 dhd
->rpm_timer
.function
= dhd_runtimepm
;
9700 dhd
->rpm_timer_valid
= FALSE
;
9702 dhd
->thr_rpm_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9703 PROC_START(dhd_rpm_state_thread
, dhd
, &dhd
->thr_rpm_ctl
, 0, "dhd_rpm_state_thread");
9704 if (dhd
->thr_rpm_ctl
.thr_pid
< 0) {
9707 #endif /* DHD_PCIE_RUNTIMEPM */
9710 debugger_init((void *) bus
);
9713 /* Set up the bottom half handler */
9714 if (dhd_dpc_prio
>= 0) {
9715 /* Initialize DPC thread */
9716 PROC_START(dhd_dpc_thread
, dhd
, &dhd
->thr_dpc_ctl
, 0, "dhd_dpc");
9717 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
9721 /* use tasklet for dpc */
9722 tasklet_init(&dhd
->tasklet
, dhd_dpc
, (ulong
)dhd
);
9723 dhd
->thr_dpc_ctl
.thr_pid
= -1;
9726 if (dhd
->rxthread_enabled
) {
9727 bzero(&dhd
->pub
.skbbuf
[0], sizeof(void *) * MAXSKBPEND
);
9728 /* Initialize RXF thread */
9729 PROC_START(dhd_rxf_thread
, dhd
, &dhd
->thr_rxf_ctl
, 0, "dhd_rxf");
9730 if (dhd
->thr_rxf_ctl
.thr_pid
< 0) {
9734 #endif /* !BCMDBUS */
9735 #ifdef SHOW_LOGTRACE
9736 skb_queue_head_init(&dhd
->evt_trace_queue
);
9737 #endif /* SHOW_LOGTRACE */
9739 dhd_state
|= DHD_ATTACH_STATE_THREADS_CREATED
;
9741 #if defined(CONFIG_PM_SLEEP)
9742 if (!dhd_pm_notifier_registered
) {
9743 dhd_pm_notifier_registered
= TRUE
;
9744 dhd
->pm_notifier
.notifier_call
= dhd_pm_callback
;
9745 dhd
->pm_notifier
.priority
= 10;
9746 register_pm_notifier(&dhd
->pm_notifier
);
9749 #endif /* CONFIG_PM_SLEEP */
9751 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9752 dhd
->early_suspend
.level
= EARLY_SUSPEND_LEVEL_BLANK_SCREEN
+ 20;
9753 dhd
->early_suspend
.suspend
= dhd_early_suspend
;
9754 dhd
->early_suspend
.resume
= dhd_late_resume
;
9755 register_early_suspend(&dhd
->early_suspend
);
9756 dhd_state
|= DHD_ATTACH_STATE_EARLYSUSPEND_DONE
;
9757 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9759 #ifdef ARP_OFFLOAD_SUPPORT
9760 dhd
->pend_ipaddr
= 0;
9761 if (!dhd_inetaddr_notifier_registered
) {
9762 dhd_inetaddr_notifier_registered
= TRUE
;
9763 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
9765 #endif /* ARP_OFFLOAD_SUPPORT */
9767 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9768 if (!dhd_inet6addr_notifier_registered
) {
9769 dhd_inet6addr_notifier_registered
= TRUE
;
9770 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
9772 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9773 dhd
->dhd_deferred_wq
= dhd_deferred_work_init((void *)dhd
);
9774 #ifdef DEBUG_CPU_FREQ
9775 dhd
->new_freq
= alloc_percpu(int);
9776 dhd
->freq_trans
.notifier_call
= dhd_cpufreq_notifier
;
9777 cpufreq_register_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
9779 #ifdef DHDTCPACK_SUPPRESS
9780 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_DEFAULT
);
9781 #endif /* DHDTCPACK_SUPPRESS */
9783 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9784 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9787 #ifdef DHD_DEBUG_PAGEALLOC
9788 register_page_corrupt_cb(dhd_page_corrupt_cb
, &dhd
->pub
);
9789 #endif /* DHD_DEBUG_PAGEALLOC */
9793 dhd_lb_set_default_cpus(dhd
);
9795 /* Initialize the CPU Masks */
9796 if (dhd_cpumasks_init(dhd
) == 0) {
9797 /* Now we have the current CPU maps, run through candidacy */
9798 dhd_select_cpu_candidacy(dhd
);
9800 * If we are able to initialize CPU masks, lets register to the
9801 * CPU Hotplug framework to change the CPU for each job dynamically
9802 * using candidacy algorithm.
9804 dhd
->cpu_notifier
.notifier_call
= dhd_cpu_callback
;
9805 register_hotcpu_notifier(&dhd
->cpu_notifier
); /* Register a callback */
9808 * We are unable to initialize CPU masks, so candidacy algorithm
9809 * won't run, but still Load Balancing will be honoured based
9810 * on the CPUs allocated for a given job statically during init
9812 dhd
->cpu_notifier
.notifier_call
= NULL
;
9813 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9818 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9819 /* Trun ON the feature by default */
9820 atomic_set(&dhd
->lb_txp_active
, 1);
9822 /* Trun OFF the feature by default */
9823 atomic_set(&dhd
->lb_txp_active
, 0);
9824 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9825 #endif /* DHD_LB_TXP */
9827 DHD_LB_STATS_INIT(&dhd
->pub
);
9829 /* Initialize the Load Balancing Tasklets and Napi object */
9830 #if defined(DHD_LB_TXC)
9831 tasklet_init(&dhd
->tx_compl_tasklet
,
9832 dhd_lb_tx_compl_handler
, (ulong
)(&dhd
->pub
));
9833 INIT_WORK(&dhd
->tx_compl_dispatcher_work
, dhd_tx_compl_dispatcher_fn
);
9834 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__
));
9835 #endif /* DHD_LB_TXC */
9837 #if defined(DHD_LB_RXC)
9838 tasklet_init(&dhd
->rx_compl_tasklet
,
9839 dhd_lb_rx_compl_handler
, (ulong
)(&dhd
->pub
));
9840 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__
));
9841 #endif /* DHD_LB_RXC */
9843 #if defined(DHD_LB_RXP)
9844 __skb_queue_head_init(&dhd
->rx_pend_queue
);
9845 skb_queue_head_init(&dhd
->rx_napi_queue
);
9846 /* Initialize the work that dispatches NAPI job to a given core */
9847 INIT_WORK(&dhd
->rx_napi_dispatcher_work
, dhd_rx_napi_dispatcher_fn
);
9848 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__
));
9849 #endif /* DHD_LB_RXP */
9851 #if defined(DHD_LB_TXP)
9852 INIT_WORK(&dhd
->tx_dispatcher_work
, dhd_tx_dispatcher_work
);
9853 skb_queue_head_init(&dhd
->tx_pend_queue
);
9854 /* Initialize the work that dispatches TX job to a given core */
9855 tasklet_init(&dhd
->tx_tasklet
,
9856 dhd_lb_tx_handler
, (ulong
)(dhd
));
9857 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__
));
9858 #endif /* DHD_LB_TXP */
9860 dhd_state
|= DHD_ATTACH_STATE_LB_ATTACH_DONE
;
9863 #ifdef SHOW_LOGTRACE
9864 INIT_WORK(&dhd
->event_log_dispatcher_work
, dhd_event_logtrace_process
);
9865 #endif /* SHOW_LOGTRACE */
9867 DHD_SSSR_MEMPOOL_INIT(&dhd
->pub
);
9869 #ifdef REPORT_FATAL_TIMEOUTS
9870 init_dhd_timeouts(&dhd
->pub
);
9871 #endif /* REPORT_FATAL_TIMEOUTS */
9873 dhd
->pub
.extended_trap_data
= MALLOCZ(osh
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
9874 if (dhd
->pub
.extended_trap_data
== NULL
) {
9875 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__
));
9877 #endif /* BCMPCIE */
9879 (void)dhd_sysfs_init(dhd
);
9881 dhd_state
|= DHD_ATTACH_STATE_DONE
;
9882 dhd
->dhd_state
= dhd_state
;
9889 if (dhd_state
>= DHD_ATTACH_STATE_DHD_ALLOC
) {
9890 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9891 __FUNCTION__
, dhd_state
, &dhd
->pub
));
9892 dhd
->dhd_state
= dhd_state
;
9893 dhd_detach(&dhd
->pub
);
9894 dhd_free(&dhd
->pub
);
9900 int dhd_get_fw_mode(dhd_info_t
*dhdinfo
)
9902 if (strstr(dhdinfo
->fw_path
, "_apsta") != NULL
)
9903 return DHD_FLAG_HOSTAP_MODE
;
9904 if (strstr(dhdinfo
->fw_path
, "_p2p") != NULL
)
9905 return DHD_FLAG_P2P_MODE
;
9906 if (strstr(dhdinfo
->fw_path
, "_ibss") != NULL
)
9907 return DHD_FLAG_IBSS_MODE
;
9908 if (strstr(dhdinfo
->fw_path
, "_mfg") != NULL
)
9909 return DHD_FLAG_MFG_MODE
;
9911 return DHD_FLAG_STA_MODE
;
9914 int dhd_bus_get_fw_mode(dhd_pub_t
*dhdp
)
9916 return dhd_get_fw_mode(dhdp
->info
);
9919 bool dhd_update_fw_nv_path(dhd_info_t
*dhdinfo
)
9925 const char *fw
= NULL
;
9926 const char *nv
= NULL
;
9927 const char *clm
= NULL
;
9928 const char *conf
= NULL
;
9929 #ifdef DHD_UCODE_DOWNLOAD
9931 const char *uc
= NULL
;
9932 #endif /* DHD_UCODE_DOWNLOAD */
9933 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
9934 int fw_path_len
= sizeof(dhdinfo
->fw_path
);
9935 int nv_path_len
= sizeof(dhdinfo
->nv_path
);
9938 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9939 * The path from adapter info is used for initialization only (as it won't change).
9941 * The firmware_path/nvram_path module parameter may be changed by the system at run
9942 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9943 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9944 * module parameter after it is copied. We won't update the path until the module parameter
9945 * is changed again (first character is not '\0')
9948 /* set default firmware and nvram path for built-in type driver */
9949 // if (!dhd_download_fw_on_driverload) {
9950 #ifdef CONFIG_BCMDHD_FW_PATH
9951 fw
= CONFIG_BCMDHD_FW_PATH
;
9952 #endif /* CONFIG_BCMDHD_FW_PATH */
9953 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9954 nv
= CONFIG_BCMDHD_NVRAM_PATH
;
9955 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9958 /* check if we need to initialize the path */
9959 if (dhdinfo
->fw_path
[0] == '\0') {
9960 if (adapter
&& adapter
->fw_path
&& adapter
->fw_path
[0] != '\0')
9961 fw
= adapter
->fw_path
;
9964 if (dhdinfo
->nv_path
[0] == '\0') {
9965 if (adapter
&& adapter
->nv_path
&& adapter
->nv_path
[0] != '\0')
9966 nv
= adapter
->nv_path
;
9968 if (dhdinfo
->clm_path
[0] == '\0') {
9969 if (adapter
&& adapter
->clm_path
&& adapter
->clm_path
[0] != '\0')
9970 clm
= adapter
->clm_path
;
9972 if (dhdinfo
->conf_path
[0] == '\0') {
9973 if (adapter
&& adapter
->conf_path
&& adapter
->conf_path
[0] != '\0')
9974 conf
= adapter
->conf_path
;
9977 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9979 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9981 if (firmware_path
[0] != '\0')
9983 if (nvram_path
[0] != '\0')
9985 if (clm_path
[0] != '\0')
9987 if (config_path
[0] != '\0')
9989 #ifdef DHD_UCODE_DOWNLOAD
9990 if (ucode_path
[0] != '\0')
9992 #endif /* DHD_UCODE_DOWNLOAD */
9994 if (fw
&& fw
[0] != '\0') {
9995 fw_len
= strlen(fw
);
9996 if (fw_len
>= fw_path_len
) {
9997 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
10000 strncpy(dhdinfo
->fw_path
, fw
, fw_path_len
);
10001 if (dhdinfo
->fw_path
[fw_len
-1] == '\n')
10002 dhdinfo
->fw_path
[fw_len
-1] = '\0';
10004 if (nv
&& nv
[0] != '\0') {
10005 nv_len
= strlen(nv
);
10006 if (nv_len
>= nv_path_len
) {
10007 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10010 memset(dhdinfo
->nv_path
, 0, nv_path_len
);
10011 strncpy(dhdinfo
->nv_path
, nv
, nv_path_len
);
10012 #ifdef DHD_USE_SINGLE_NVRAM_FILE
10013 /* Remove "_net" or "_mfg" tag from current nvram path */
10015 char *nvram_tag
= "nvram_";
10016 char *ext_tag
= ".txt";
10017 char *sp_nvram
= strnstr(dhdinfo
->nv_path
, nvram_tag
, nv_path_len
);
10018 bool valid_buf
= sp_nvram
&& ((uint32
)(sp_nvram
+ strlen(nvram_tag
) +
10019 strlen(ext_tag
) - dhdinfo
->nv_path
) <= nv_path_len
);
10021 char *sp
= sp_nvram
+ strlen(nvram_tag
) - 1;
10022 uint32 padding_size
= (uint32
)(dhdinfo
->nv_path
+
10024 memset(sp
, 0, padding_size
);
10025 strncat(dhdinfo
->nv_path
, ext_tag
, strlen(ext_tag
));
10026 nv_len
= strlen(dhdinfo
->nv_path
);
10027 DHD_INFO(("%s: new nvram path = %s\n",
10028 __FUNCTION__
, dhdinfo
->nv_path
));
10029 } else if (sp_nvram
) {
10030 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10034 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10035 " nvram path = %s\n", __FUNCTION__
, dhdinfo
->nv_path
));
10038 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10039 if (dhdinfo
->nv_path
[nv_len
-1] == '\n')
10040 dhdinfo
->nv_path
[nv_len
-1] = '\0';
10042 if (clm
&& clm
[0] != '\0') {
10043 clm_len
= strlen(clm
);
10044 if (clm_len
>= sizeof(dhdinfo
->clm_path
)) {
10045 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
10048 strncpy(dhdinfo
->clm_path
, clm
, sizeof(dhdinfo
->clm_path
));
10049 if (dhdinfo
->clm_path
[clm_len
-1] == '\n')
10050 dhdinfo
->clm_path
[clm_len
-1] = '\0';
10052 if (conf
&& conf
[0] != '\0') {
10053 conf_len
= strlen(conf
);
10054 if (conf_len
>= sizeof(dhdinfo
->conf_path
)) {
10055 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
10058 strncpy(dhdinfo
->conf_path
, conf
, sizeof(dhdinfo
->conf_path
));
10059 if (dhdinfo
->conf_path
[conf_len
-1] == '\n')
10060 dhdinfo
->conf_path
[conf_len
-1] = '\0';
10062 #ifdef DHD_UCODE_DOWNLOAD
10063 if (uc
&& uc
[0] != '\0') {
10064 uc_len
= strlen(uc
);
10065 if (uc_len
>= sizeof(dhdinfo
->uc_path
)) {
10066 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10069 strncpy(dhdinfo
->uc_path
, uc
, sizeof(dhdinfo
->uc_path
));
10070 if (dhdinfo
->uc_path
[uc_len
-1] == '\n')
10071 dhdinfo
->uc_path
[uc_len
-1] = '\0';
10073 #endif /* DHD_UCODE_DOWNLOAD */
10076 /* clear the path in module parameter */
10077 if (dhd_download_fw_on_driverload
) {
10078 firmware_path
[0] = '\0';
10079 nvram_path
[0] = '\0';
10080 clm_path
[0] = '\0';
10081 config_path
[0] = '\0';
10084 #ifdef DHD_UCODE_DOWNLOAD
10085 ucode_path
[0] = '\0';
10086 DHD_ERROR(("ucode path: %s\n", dhdinfo
->uc_path
));
10087 #endif /* DHD_UCODE_DOWNLOAD */
10089 #ifndef BCMEMBEDIMAGE
10090 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10091 if (dhdinfo
->fw_path
[0] == '\0') {
10092 DHD_ERROR(("firmware path not found\n"));
10095 if (dhdinfo
->nv_path
[0] == '\0') {
10096 DHD_ERROR(("nvram path not found\n"));
10099 #endif /* BCMEMBEDIMAGE */
10104 #if defined(BT_OVER_SDIO)
10105 extern bool dhd_update_btfw_path(dhd_info_t
*dhdinfo
, char* btfw_path
)
10108 const char *fw
= NULL
;
10109 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
10112 /* Update bt firmware path. The path may be from adapter info or module parameter
10113 * The path from adapter info is used for initialization only (as it won't change).
10115 * The btfw_path module parameter may be changed by the system at run
10116 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10117 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10118 * module parameter after it is copied. We won't update the path until the module parameter
10119 * is changed again (first character is not '\0')
10122 /* set default firmware and nvram path for built-in type driver */
10123 if (!dhd_download_fw_on_driverload
) {
10124 #ifdef CONFIG_BCMDHD_BTFW_PATH
10125 fw
= CONFIG_BCMDHD_BTFW_PATH
;
10126 #endif /* CONFIG_BCMDHD_FW_PATH */
10129 /* check if we need to initialize the path */
10130 if (dhdinfo
->btfw_path
[0] == '\0') {
10131 if (adapter
&& adapter
->btfw_path
&& adapter
->btfw_path
[0] != '\0')
10132 fw
= adapter
->btfw_path
;
10135 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10137 if (btfw_path
[0] != '\0')
10140 if (fw
&& fw
[0] != '\0') {
10141 fw_len
= strlen(fw
);
10142 if (fw_len
>= sizeof(dhdinfo
->btfw_path
)) {
10143 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10146 strncpy(dhdinfo
->btfw_path
, fw
, sizeof(dhdinfo
->btfw_path
));
10147 if (dhdinfo
->btfw_path
[fw_len
-1] == '\n')
10148 dhdinfo
->btfw_path
[fw_len
-1] = '\0';
10151 /* clear the path in module parameter */
10152 btfw_path
[0] = '\0';
10154 if (dhdinfo
->btfw_path
[0] == '\0') {
10155 DHD_ERROR(("bt firmware path not found\n"));
10161 #endif /* defined (BT_OVER_SDIO) */
10164 #ifdef CUSTOMER_HW4_DEBUG
10165 bool dhd_validate_chipid(dhd_pub_t
*dhdp
)
10167 uint chipid
= dhd_bus_chip_id(dhdp
);
10168 uint config_chipid
;
10170 #ifdef BCM4361_CHIP
10171 config_chipid
= BCM4361_CHIP_ID
;
10172 #elif defined(BCM4359_CHIP)
10173 config_chipid
= BCM4359_CHIP_ID
;
10174 #elif defined(BCM4358_CHIP)
10175 config_chipid
= BCM4358_CHIP_ID
;
10176 #elif defined(BCM4354_CHIP)
10177 config_chipid
= BCM4354_CHIP_ID
;
10178 #elif defined(BCM4339_CHIP)
10179 config_chipid
= BCM4339_CHIP_ID
;
10180 #elif defined(BCM43349_CHIP)
10181 config_chipid
= BCM43349_CHIP_ID
;
10182 #elif defined(BCM4335_CHIP)
10183 config_chipid
= BCM4335_CHIP_ID
;
10184 #elif defined(BCM43241_CHIP)
10185 config_chipid
= BCM4324_CHIP_ID
;
10186 #elif defined(BCM4330_CHIP)
10187 config_chipid
= BCM4330_CHIP_ID
;
10188 #elif defined(BCM43430_CHIP)
10189 config_chipid
= BCM43430_CHIP_ID
;
10190 #elif defined(BCM43018_CHIP)
10191 config_chipid
= BCM43018_CHIP_ID
;
10192 #elif defined(BCM43455_CHIP)
10193 config_chipid
= BCM4345_CHIP_ID
;
10194 #elif defined(BCM4334W_CHIP)
10195 config_chipid
= BCM43342_CHIP_ID
;
10196 #elif defined(BCM43454_CHIP)
10197 config_chipid
= BCM43454_CHIP_ID
;
10198 #elif defined(BCM43012_CHIP_)
10199 config_chipid
= BCM43012_CHIP_ID
;
10201 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10202 " please add CONFIG_BCMXXXX into the Kernel and"
10203 " BCMXXXX_CHIP definition into the DHD driver\n",
10208 #endif /* BCM4354_CHIP */
10210 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10211 if (config_chipid
== BCM43454_CHIP_ID
|| config_chipid
== BCM4345_CHIP_ID
) {
10214 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10215 #if defined(BCM4359_CHIP)
10216 if (chipid
== BCM4355_CHIP_ID
&& config_chipid
== BCM4359_CHIP_ID
) {
10219 #endif /* BCM4359_CHIP */
10220 #if defined(BCM4361_CHIP)
10221 if (chipid
== BCM4347_CHIP_ID
&& config_chipid
== BCM4361_CHIP_ID
) {
10224 #endif /* BCM4361_CHIP */
10226 return config_chipid
== chipid
;
10228 #endif /* CUSTOMER_HW4_DEBUG */
10230 #if defined(BT_OVER_SDIO)
10231 wlan_bt_handle_t
dhd_bt_get_pub_hndl(void)
10233 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__
, g_dhd_pub
));
10234 /* assuming that dhd_pub_t type pointer is available from a global variable */
10235 return (wlan_bt_handle_t
) g_dhd_pub
;
10236 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl
);
10238 int dhd_download_btfw(wlan_bt_handle_t handle
, char* btfw_path
)
10241 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
10242 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10245 /* Download BT firmware image to the dongle */
10246 if (dhd
->pub
.busstate
== DHD_BUS_DATA
&& dhd_update_btfw_path(dhd
, btfw_path
)) {
10247 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__
, dhd
->btfw_path
));
10248 ret
= dhd_bus_download_btfw(dhd
->pub
.bus
, dhd
->pub
.osh
, dhd
->btfw_path
);
10250 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10251 __FUNCTION__
, dhd
->btfw_path
));
10256 } EXPORT_SYMBOL(dhd_download_btfw
);
10257 #endif /* defined (BT_OVER_SDIO) */
10261 dhd_bus_start(dhd_pub_t
*dhdp
)
10264 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10265 unsigned long flags
;
10267 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10268 int fw_download_start
= 0, fw_download_end
= 0, f2_sync_start
= 0, f2_sync_end
= 0;
10269 #endif /* DHD_DEBUG && BCMSDIO */
10272 DHD_TRACE(("Enter %s:\n", __FUNCTION__
));
10274 DHD_PERIM_LOCK(dhdp
);
10275 #ifdef HOFFLOAD_MODULES
10276 dhd_linux_get_modfw_address(dhdp
);
10278 /* try to download image and nvram to the dongle */
10279 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
&& dhd_update_fw_nv_path(dhd
)) {
10280 /* Indicate FW Download has not yet done */
10281 dhd
->pub
.fw_download_done
= FALSE
;
10282 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
10283 __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
, dhd
->conf_path
));
10284 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10285 fw_download_start
= OSL_SYSUPTIME();
10286 #endif /* DHD_DEBUG && BCMSDIO */
10287 ret
= dhd_bus_download_firmware(dhd
->pub
.bus
, dhd
->pub
.osh
,
10288 dhd
->fw_path
, dhd
->nv_path
, dhd
->clm_path
, dhd
->conf_path
);
10289 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10290 fw_download_end
= OSL_SYSUPTIME();
10291 #endif /* DHD_DEBUG && BCMSDIO */
10293 DHD_ERROR(("%s: failed to download firmware %s\n",
10294 __FUNCTION__
, dhd
->fw_path
));
10295 DHD_PERIM_UNLOCK(dhdp
);
10298 /* Indicate FW Download has succeeded */
10299 dhd
->pub
.fw_download_done
= TRUE
;
10301 if (dhd
->pub
.busstate
!= DHD_BUS_LOAD
) {
10302 DHD_PERIM_UNLOCK(dhdp
);
10307 dhd_os_sdlock(dhdp
);
10308 #endif /* BCMSDIO */
10310 /* Start the watchdog timer */
10311 dhd
->pub
.tickcnt
= 0;
10312 dhd_os_wd_timer(&dhd
->pub
, dhd_watchdog_ms
);
10314 /* Bring up the bus */
10315 if ((ret
= dhd_bus_init(&dhd
->pub
, FALSE
)) != 0) {
10317 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__
, ret
));
10319 dhd_os_sdunlock(dhdp
);
10320 #endif /* BCMSDIO */
10321 DHD_PERIM_UNLOCK(dhdp
);
10325 DHD_ENABLE_RUNTIME_PM(&dhd
->pub
);
10328 dhd_ulp_set_ulp_state(dhdp
, DHD_ULP_DISABLED
);
10329 #endif /* DHD_ULP */
10330 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
10331 /* Host registration for OOB interrupt */
10332 if (dhd_bus_oob_intr_register(dhdp
)) {
10333 /* deactivate timer and wait for the handler to finish */
10334 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10335 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10336 dhd
->wd_timer_valid
= FALSE
;
10337 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10338 del_timer_sync(&dhd
->timer
);
10340 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10341 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10342 DHD_PERIM_UNLOCK(dhdp
);
10343 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__
));
10347 #if defined(BCMPCIE_OOB_HOST_WAKE)
10348 dhd_bus_oob_intr_set(dhdp
, TRUE
);
10350 /* Enable oob at firmware */
10351 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
10352 #endif /* BCMPCIE_OOB_HOST_WAKE */
10353 #elif defined(FORCE_WOWLAN)
10354 /* Enable oob at firmware */
10355 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
10357 #ifdef PCIE_FULL_DONGLE
10359 /* max_h2d_rings includes H2D common rings */
10360 uint32 max_h2d_rings
= dhd_bus_max_h2d_queues(dhd
->pub
.bus
);
10362 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__
,
10364 if ((ret
= dhd_flow_rings_init(&dhd
->pub
, max_h2d_rings
)) != BCME_OK
) {
10366 dhd_os_sdunlock(dhdp
);
10367 #endif /* BCMSDIO */
10368 DHD_PERIM_UNLOCK(dhdp
);
10372 #endif /* PCIE_FULL_DONGLE */
10374 /* Do protocol initialization necessary for IOCTL/IOVAR */
10375 ret
= dhd_prot_init(&dhd
->pub
);
10376 if (unlikely(ret
) != BCME_OK
) {
10377 DHD_PERIM_UNLOCK(dhdp
);
10378 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10382 /* If bus is not ready, can't come up */
10383 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
10384 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10385 dhd
->wd_timer_valid
= FALSE
;
10386 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10387 del_timer_sync(&dhd
->timer
);
10388 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__
));
10389 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10391 dhd_os_sdunlock(dhdp
);
10392 #endif /* BCMSDIO */
10393 DHD_PERIM_UNLOCK(dhdp
);
10398 dhd_os_sdunlock(dhdp
);
10399 #endif /* BCMSDIO */
10401 /* Bus is ready, query any dongle information */
10402 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10403 f2_sync_start
= OSL_SYSUPTIME();
10404 #endif /* DHD_DEBUG && BCMSDIO */
10405 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
10406 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10407 dhd
->wd_timer_valid
= FALSE
;
10408 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10409 del_timer_sync(&dhd
->timer
);
10410 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__
));
10411 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10412 DHD_PERIM_UNLOCK(dhdp
);
10415 #if defined(CONFIG_SOC_EXYNOS8895)
10416 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__
));
10417 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI
);
10418 #endif /* CONFIG_SOC_EXYNOS8895 */
10420 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10421 f2_sync_end
= OSL_SYSUPTIME();
10422 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10423 (fw_download_end
- fw_download_start
) + (f2_sync_end
- f2_sync_start
)));
10424 #endif /* DHD_DEBUG && BCMSDIO */
10426 #ifdef ARP_OFFLOAD_SUPPORT
10427 if (dhd
->pend_ipaddr
) {
10428 #ifdef AOE_IP_ALIAS_SUPPORT
10429 aoe_update_host_ipv4_table(&dhd
->pub
, dhd
->pend_ipaddr
, TRUE
, 0);
10430 #endif /* AOE_IP_ALIAS_SUPPORT */
10431 dhd
->pend_ipaddr
= 0;
10433 #endif /* ARP_OFFLOAD_SUPPORT */
10435 #if defined(TRAFFIC_MGMT_DWM)
10436 bzero(&dhd
->pub
.dhd_tm_dwm_tbl
, sizeof(dhd_trf_mgmt_dwm_tbl_t
));
10438 DHD_PERIM_UNLOCK(dhdp
);
10441 #endif /* !BCMDBUS */
10444 int _dhd_tdls_enable(dhd_pub_t
*dhd
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10446 uint32 tdls
= tdls_on
;
10448 uint32 tdls_auto_op
= 0;
10449 uint32 tdls_idle_time
= CUSTOM_TDLS_IDLE_MODE_SETTING
;
10450 int32 tdls_rssi_high
= CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
;
10451 int32 tdls_rssi_low
= CUSTOM_TDLS_RSSI_THRESHOLD_LOW
;
10452 BCM_REFERENCE(mac
);
10453 if (!FW_SUPPORTED(dhd
, tdls
))
10456 if (dhd
->tdls_enable
== tdls_on
)
10458 ret
= dhd_iovar(dhd
, 0, "tdls_enable", (char *)&tdls
, sizeof(tdls
), NULL
, 0, TRUE
);
10460 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__
, tdls
, ret
));
10463 dhd
->tdls_enable
= tdls_on
;
10466 tdls_auto_op
= auto_on
;
10467 ret
= dhd_iovar(dhd
, 0, "tdls_auto_op", (char *)&tdls_auto_op
, sizeof(tdls_auto_op
), NULL
,
10470 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__
, ret
));
10474 if (tdls_auto_op
) {
10475 ret
= dhd_iovar(dhd
, 0, "tdls_idle_time", (char *)&tdls_idle_time
,
10476 sizeof(tdls_idle_time
), NULL
, 0, TRUE
);
10478 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__
, ret
));
10481 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_high", (char *)&tdls_rssi_high
,
10482 sizeof(tdls_rssi_high
), NULL
, 0, TRUE
);
10484 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__
, ret
));
10487 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_low", (char *)&tdls_rssi_low
,
10488 sizeof(tdls_rssi_low
), NULL
, 0, TRUE
);
10490 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__
, ret
));
10499 int dhd_tdls_enable(struct net_device
*dev
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10501 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
10504 ret
= _dhd_tdls_enable(&dhd
->pub
, tdls_on
, auto_on
, mac
);
10511 dhd_tdls_set_mode(dhd_pub_t
*dhd
, bool wfd_mode
)
10514 bool auto_on
= false;
10515 uint32 mode
= wfd_mode
;
10517 #ifdef ENABLE_TDLS_AUTO_MODE
10525 #endif /* ENABLE_TDLS_AUTO_MODE */
10526 ret
= _dhd_tdls_enable(dhd
, false, auto_on
, NULL
);
10528 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret
));
10532 ret
= dhd_iovar(dhd
, 0, "tdls_wfd_mode", (char *)&mode
, sizeof(mode
), NULL
, 0, TRUE
);
10533 if ((ret
< 0) && (ret
!= BCME_UNSUPPORTED
)) {
10534 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__
, ret
));
10538 ret
= _dhd_tdls_enable(dhd
, true, auto_on
, NULL
);
10540 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret
));
10544 dhd
->tdls_mode
= mode
;
10547 #ifdef PCIE_FULL_DONGLE
10548 int dhd_tdls_update_peer_info(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
)
10550 dhd_pub_t
*dhd_pub
= dhdp
;
10551 tdls_peer_node_t
*cur
= dhd_pub
->peer_tbl
.node
;
10552 tdls_peer_node_t
*new = NULL
, *prev
= NULL
;
10553 int ifindex
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
10554 uint8
*da
= (uint8
*)&event
->addr
.octet
[0];
10555 bool connect
= FALSE
;
10556 uint32 reason
= ntoh32(event
->reason
);
10557 unsigned long flags
;
10559 if (reason
== WLC_E_TDLS_PEER_CONNECTED
)
10561 else if (reason
== WLC_E_TDLS_PEER_DISCONNECTED
)
10565 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__
));
10568 if (ifindex
== DHD_BAD_IF
)
10572 while (cur
!= NULL
) {
10573 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10574 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10575 __FUNCTION__
, __LINE__
));
10581 new = MALLOC(dhd_pub
->osh
, sizeof(tdls_peer_node_t
));
10583 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__
));
10586 memcpy(new->addr
, da
, ETHER_ADDR_LEN
);
10587 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10588 new->next
= dhd_pub
->peer_tbl
.node
;
10589 dhd_pub
->peer_tbl
.node
= new;
10590 dhd_pub
->peer_tbl
.tdls_peer_count
++;
10591 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10594 while (cur
!= NULL
) {
10595 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10596 dhd_flow_rings_delete_for_peer(dhd_pub
, (uint8
)ifindex
, da
);
10597 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10599 prev
->next
= cur
->next
;
10601 dhd_pub
->peer_tbl
.node
= cur
->next
;
10602 MFREE(dhd_pub
->osh
, cur
, sizeof(tdls_peer_node_t
));
10603 dhd_pub
->peer_tbl
.tdls_peer_count
--;
10604 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10610 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__
));
10614 #endif /* PCIE_FULL_DONGLE */
10617 bool dhd_is_concurrent_mode(dhd_pub_t
*dhd
)
10622 if (dhd
->op_mode
& DHD_FLAG_CONCURR_MULTI_CHAN_MODE
)
10624 else if ((dhd
->op_mode
& DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
) ==
10625 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
)
10630 #if !defined(AP) && defined(WLP2P)
10631 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10632 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10633 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10634 * would still be named as fw_bcmdhd_apsta.
10637 dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
)
10640 char buf
[WLC_IOCTL_SMLEN
];
10641 bool mchan_supported
= FALSE
;
10642 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10643 * test mode, that means we only will use the mode as it is
10645 if (dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))
10647 if (FW_SUPPORTED(dhd
, vsdb
)) {
10648 mchan_supported
= TRUE
;
10650 if (!FW_SUPPORTED(dhd
, p2p
)) {
10651 DHD_TRACE(("Chip does not support p2p\n"));
10654 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10655 memset(buf
, 0, sizeof(buf
));
10656 ret
= dhd_iovar(dhd
, 0, "p2p", NULL
, 0, (char *)&buf
,
10657 sizeof(buf
), FALSE
);
10659 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__
, ret
));
10663 /* By default, chip supports single chan concurrency,
10664 * now lets check for mchan
10666 ret
= DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
;
10667 if (mchan_supported
)
10668 ret
|= DHD_FLAG_CONCURR_MULTI_CHAN_MODE
;
10669 if (FW_SUPPORTED(dhd
, rsdb
)) {
10670 ret
|= DHD_FLAG_RSDB_MODE
;
10672 #ifdef WL_SUPPORT_MULTIP2P
10673 if (FW_SUPPORTED(dhd
, mp2p
)) {
10674 ret
|= DHD_FLAG_MP2P_MODE
;
10676 #endif /* WL_SUPPORT_MULTIP2P */
10677 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10681 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10689 #ifdef SUPPORT_AP_POWERSAVE
10690 #define RXCHAIN_PWRSAVE_PPS 10
10691 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
10692 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
10693 int dhd_set_ap_powersave(dhd_pub_t
*dhdp
, int ifidx
, int enable
)
10695 int32 pps
= RXCHAIN_PWRSAVE_PPS
;
10696 int32 quiet_time
= RXCHAIN_PWRSAVE_QUIET_TIME
;
10697 int32 stas_assoc_check
= RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK
;
10701 ret
= dhd_iovar(dhdp
, 0, "rxchain_pwrsave_enable", (char *)&enable
, sizeof(enable
),
10703 if (ret
!= BCME_OK
) {
10704 DHD_ERROR(("Failed to enable AP power save\n"));
10706 ret
= dhd_iovar(dhdp
, 0, "rxchain_pwrsave_pps", (char *)&pps
, sizeof(pps
), NULL
, 0,
10708 if (ret
!= BCME_OK
) {
10709 DHD_ERROR(("Failed to set pps\n"));
10711 ret
= dhd_iovar(dhdp
, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time
,
10712 sizeof(quiet_time
), NULL
, 0, TRUE
);
10713 if (ret
!= BCME_OK
) {
10714 DHD_ERROR(("Failed to set quiet time\n"));
10716 ret
= dhd_iovar(dhdp
, 0, "rxchain_pwrsave_stas_assoc_check",
10717 (char *)&stas_assoc_check
, sizeof(stas_assoc_check
), NULL
, 0, TRUE
);
10718 if (ret
!= BCME_OK
) {
10719 DHD_ERROR(("Failed to set stas assoc check\n"));
10722 ret
= dhd_iovar(dhdp
, 0, "rxchain_pwrsave_enable", (char *)&enable
, sizeof(enable
),
10724 if (ret
!= BCME_OK
) {
10725 DHD_ERROR(("Failed to disable AP power save\n"));
10731 #endif /* SUPPORT_AP_POWERSAVE */
10736 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10738 dhd_enable_adps(dhd_pub_t
*dhd
, uint8 on
)
10744 bcm_iov_buf_t
*iov_buf
= NULL
;
10745 wl_adps_params_v1_t
*data
= NULL
;
10746 char buf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
10748 len
= OFFSETOF(bcm_iov_buf_t
, data
) + sizeof(*data
);
10749 iov_buf
= kmalloc(len
, GFP_KERNEL
);
10750 if (iov_buf
== NULL
) {
10751 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__
, len
));
10756 iov_buf
->version
= WL_ADPS_IOV_VER
;
10757 iov_buf
->len
= sizeof(*data
);
10758 iov_buf
->id
= WL_ADPS_IOV_MODE
;
10760 data
= (wl_adps_params_v1_t
*)iov_buf
->data
;
10761 data
->version
= ADPS_SUB_IOV_VERSION_1
;
10762 data
->length
= sizeof(*data
);
10765 for (i
= 1; i
<= MAX_BANDS
; i
++) {
10767 bcm_mkiovar("adps", (char *)iov_buf
, len
, buf
, sizeof(buf
));
10768 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0)) < 0) {
10769 if (ret
== BCME_UNSUPPORTED
) {
10770 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__
));
10775 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10776 __FUNCTION__
, on
? "On" : "Off", i
, ret
));
10788 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10791 dhd_preinit_ioctls(dhd_pub_t
*dhd
)
10794 char eventmask
[WL_EVENTING_MASK_LEN
];
10795 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
10796 uint32 buf_key_b4_m4
= 1;
10798 eventmsgs_ext_t
*eventmask_msg
= NULL
;
10799 char* iov_buf
= NULL
;
10801 uint32 wnm_cap
= 0;
10802 #if defined(CUSTOM_AMPDU_BA_WSIZE)
10803 uint32 ampdu_ba_wsize
= 0;
10805 #if defined(CUSTOM_AMPDU_MPDU)
10806 int32 ampdu_mpdu
= 0;
10808 #if defined(CUSTOM_AMPDU_RELEASE)
10809 int32 ampdu_release
= 0;
10811 #if defined(CUSTOM_AMSDU_AGGSF)
10812 int32 amsdu_aggsf
= 0;
10814 shub_control_t shub_ctl
;
10816 #if defined(BCMSDIO) || defined(BCMDBUS)
10817 #ifdef PROP_TXSTATUS
10818 int wlfc_enable
= TRUE
;
10819 #ifndef DISABLE_11N
10820 uint32 hostreorder
= 1;
10822 #endif /* DISABLE_11N */
10823 #endif /* PROP_TXSTATUS */
10824 #endif /* BCMSDIO || BCMDBUS */
10825 #ifndef PCIE_FULL_DONGLE
10826 uint32 wl_ap_isolate
;
10827 #endif /* PCIE_FULL_DONGLE */
10828 uint32 frameburst
= CUSTOM_FRAMEBURST_SET
;
10829 uint wnm_bsstrans_resp
= 0;
10830 #ifdef SUPPORT_SET_CAC
10832 #endif /* SUPPORT_SET_CAC */
10833 #ifdef DHD_ENABLE_LPC
10835 #endif /* DHD_ENABLE_LPC */
10836 uint power_mode
= PM_FAST
;
10837 #if defined(BCMSDIO)
10838 uint32 dongle_align
= DHD_SDALIGN
;
10839 uint32 glom
= CUSTOM_GLOM_SETTING
;
10840 #endif /* defined(BCMSDIO) */
10841 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
10842 uint32 credall
= 1;
10844 uint bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
10845 uint scancache_enab
= TRUE
;
10846 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10847 uint32 bcn_li_bcn
= 1;
10848 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10849 uint retry_max
= CUSTOM_ASSOC_RETRY_MAX
;
10850 #if defined(ARP_OFFLOAD_SUPPORT)
10853 int scan_assoc_time
= DHD_SCAN_ASSOC_ACTIVE_TIME
;
10854 int scan_unassoc_time
= DHD_SCAN_UNASSOC_ACTIVE_TIME
;
10855 int scan_passive_time
= DHD_SCAN_PASSIVE_TIME
;
10856 char buf
[WLC_IOCTL_SMLEN
];
10858 uint32 listen_interval
= CUSTOM_LISTEN_INTERVAL
; /* Default Listen Interval in Beacons */
10859 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10860 wl_el_tag_params_t
*el_tag
= NULL
;
10861 #endif /* DHD_8021X_DUMP */
10864 int roam_trigger
[2] = {CUSTOM_ROAM_TRIGGER_SETTING
, WLC_BAND_ALL
};
10865 int roam_scan_period
[2] = {10, WLC_BAND_ALL
};
10866 int roam_delta
[2] = {CUSTOM_ROAM_DELTA_SETTING
, WLC_BAND_ALL
};
10867 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10868 int roam_fullscan_period
= 60;
10869 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10870 int roam_fullscan_period
= 120;
10871 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10872 #ifdef DISABLE_BCNLOSS_ROAM
10873 uint roam_bcnloss_off
= 1;
10874 #endif /* DISABLE_BCNLOSS_ROAM */
10876 #ifdef DISABLE_BUILTIN_ROAM
10878 #endif /* DISABLE_BUILTIN_ROAM */
10879 #endif /* ROAM_ENABLE */
10881 #if defined(SOFTAP)
10884 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10885 struct ether_addr p2p_ea
;
10887 #ifdef SOFTAP_UAPSD_OFF
10888 uint32 wme_apsd
= 0;
10889 #endif /* SOFTAP_UAPSD_OFF */
10890 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10891 uint32 apsta
= 1; /* Enable APSTA mode */
10892 #elif defined(SOFTAP_AND_GC)
10895 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10896 #ifdef GET_CUSTOM_MAC_ENABLE
10897 struct ether_addr ea_addr
;
10899 #endif /* GET_CUSTOM_MAC_ENABLE */
10903 #endif /* DISABLE_11N */
10907 #endif /* USE_WL_TXBF */
10908 #ifdef DISABLE_TXBFR
10909 uint32 txbf_bfr_cap
= 0;
10910 #endif /* DISABLE_TXBFR */
10911 #if defined(PROP_TXSTATUS)
10912 #ifdef USE_WFA_CERT_CONF
10914 #endif /* USE_WFA_CERT_CONF */
10915 #endif /* PROP_TXSTATUS */
10916 #if defined(SUPPORT_5G_1024QAM_VHT)
10917 uint32 vht_features
= 0; /* init to 0, will be set based on each support */
10919 #ifdef DISABLE_11N_PROPRIETARY_RATES
10920 uint32 ht_features
= 0;
10921 #endif /* DISABLE_11N_PROPRIETARY_RATES */
10922 #ifdef CUSTOM_PSPRETEND_THR
10923 uint32 pspretend_thr
= CUSTOM_PSPRETEND_THR
;
10925 #ifdef CUSTOM_EVENT_PM_WAKE
10926 uint32 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
;
10927 #endif /* CUSTOM_EVENT_PM_WAKE */
10928 uint32 rsdb_mode
= 0;
10929 #ifdef ENABLE_TEMP_THROTTLING
10930 wl_temp_control_t temp_control
;
10931 #endif /* ENABLE_TEMP_THROTTLING */
10932 #ifdef DISABLE_PRUNED_SCAN
10933 uint32 scan_features
= 0;
10934 #endif /* DISABLE_PRUNED_SCAN */
10935 #ifdef PKT_FILTER_SUPPORT
10936 dhd_pkt_filter_enable
= TRUE
;
10938 dhd
->apf_set
= FALSE
;
10940 #endif /* PKT_FILTER_SUPPORT */
10942 dhd
->tdls_enable
= FALSE
;
10943 dhd_tdls_set_mode(dhd
, false);
10944 #endif /* WLTDLS */
10945 dhd
->suspend_bcn_li_dtim
= CUSTOM_SUSPEND_BCN_LI_DTIM
;
10946 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10947 dhd
->max_dtim_enable
= TRUE
;
10949 dhd
->max_dtim_enable
= FALSE
;
10950 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
10951 #ifdef CUSTOM_SET_OCLOFF
10952 dhd
->ocl_off
= FALSE
;
10953 #endif /* CUSTOM_SET_OCLOFF */
10954 DHD_TRACE(("Enter %s\n", __FUNCTION__
));
10956 #ifdef DHDTCPACK_SUPPRESS
10957 dhd_tcpack_suppress_set(dhd
, dhd
->conf
->tcpack_sup_mode
);
10961 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
10962 /* clear AP flags */
10963 dhd
->dhd_cflags
&= ~WLAN_PLAT_AP_FLAG
;
10964 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
10966 #ifdef CUSTOMER_HW4_DEBUG
10967 if (!dhd_validate_chipid(dhd
)) {
10968 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
10969 __FUNCTION__
, dhd_bus_chip_id(dhd
)));
10970 #ifndef SUPPORT_MULTIPLE_CHIPS
10973 #endif /* !SUPPORT_MULTIPLE_CHIPS */
10975 #endif /* CUSTOMER_HW4_DEBUG */
10976 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
10977 (op_mode
== DHD_FLAG_MFG_MODE
)) {
10978 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
10979 #ifdef DHD_PCIE_RUNTIMEPM
10980 /* Disable RuntimePM in mfg mode */
10981 DHD_DISABLE_RUNTIME_PM(dhd
);
10982 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__
));
10983 #endif /* DHD_PCIE_RUNTIME_PM */
10984 /* Check and adjust IOCTL response timeout for Manufactring firmware */
10985 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT
);
10986 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
10989 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
10990 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__
));
10992 #ifdef GET_CUSTOM_MAC_ENABLE
10993 ret
= wifi_platform_get_mac_addr(dhd
->info
->adapter
, hw_ether
);
10995 memset(buf
, 0, sizeof(buf
));
10996 bcopy(hw_ether
, ea_addr
.octet
, sizeof(struct ether_addr
));
10997 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr
, ETHER_ADDR_LEN
, buf
, sizeof(buf
));
10998 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
11000 memset(buf
, 0, sizeof(buf
));
11001 bcm_mkiovar("hw_ether", hw_ether
, sizeof(hw_ether
), buf
, sizeof(buf
));
11002 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
11005 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG
", error=%d\n",
11006 __FUNCTION__
, MAC2STRDBG(hw_ether
), ret
));
11007 for (i
=0; i
<sizeof(hw_ether
)-ETHER_ADDR_LEN
; i
++) {
11008 printf("0x%02x,", hw_ether
[i
+ETHER_ADDR_LEN
]);
11017 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__
, ret
));
11021 #endif /* GET_CUSTOM_MAC_ENABLE */
11022 /* Get the default device MAC address directly from firmware */
11023 memset(buf
, 0, sizeof(buf
));
11024 bcm_mkiovar("cur_etheraddr", 0, 0, buf
, sizeof(buf
));
11025 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
),
11027 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__
, ret
));
11031 /* Update public MAC address after reading from Firmware */
11032 memcpy(dhd
->mac
.octet
, buf
, ETHER_ADDR_LEN
);
11034 if ((ret
= dhd_apply_default_clm(dhd
, dhd
->clm_path
)) < 0) {
11035 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__
));
11039 /* get a capabilities from firmware */
11041 uint32 cap_buf_size
= sizeof(dhd
->fw_capabilities
);
11042 memset(dhd
->fw_capabilities
, 0, cap_buf_size
);
11043 ret
= dhd_iovar(dhd
, 0, "cap", NULL
, 0, dhd
->fw_capabilities
, (cap_buf_size
- 1),
11046 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11047 __FUNCTION__
, ret
));
11051 memmove(&dhd
->fw_capabilities
[1], dhd
->fw_capabilities
, (cap_buf_size
- 1));
11052 dhd
->fw_capabilities
[0] = ' ';
11053 dhd
->fw_capabilities
[cap_buf_size
- 2] = ' ';
11054 dhd
->fw_capabilities
[cap_buf_size
- 1] = '\0';
11057 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_HOSTAP_MODE
) ||
11058 (op_mode
== DHD_FLAG_HOSTAP_MODE
)) {
11059 #ifdef SET_RANDOM_MAC_SOFTAP
11061 #endif /* SET_RANDOM_MAC_SOFTAP */
11062 dhd
->op_mode
= DHD_FLAG_HOSTAP_MODE
;
11063 #if defined(ARP_OFFLOAD_SUPPORT)
11066 #ifdef PKT_FILTER_SUPPORT
11067 dhd_pkt_filter_enable
= FALSE
;
11069 #ifdef SET_RANDOM_MAC_SOFTAP
11070 SRANDOM32((uint
)jiffies
);
11071 rand_mac
= RANDOM32();
11072 iovbuf
[0] = (unsigned char)(vendor_oui
>> 16) | 0x02; /* local admin bit */
11073 iovbuf
[1] = (unsigned char)(vendor_oui
>> 8);
11074 iovbuf
[2] = (unsigned char)vendor_oui
;
11075 iovbuf
[3] = (unsigned char)(rand_mac
& 0x0F) | 0xF0;
11076 iovbuf
[4] = (unsigned char)(rand_mac
>> 8);
11077 iovbuf
[5] = (unsigned char)(rand_mac
>> 16);
11079 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", (char *)&iovbuf
, ETHER_ADDR_LEN
, NULL
, 0,
11082 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
11084 memcpy(dhd
->mac
.octet
, iovbuf
, ETHER_ADDR_LEN
);
11085 #endif /* SET_RANDOM_MAC_SOFTAP */
11086 #ifdef USE_DYNAMIC_F2_BLKSIZE
11087 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11088 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11089 #ifdef SUPPORT_AP_POWERSAVE
11090 dhd_set_ap_powersave(dhd
, 0, TRUE
);
11091 #endif /* SUPPORT_AP_POWERSAVE */
11092 #ifdef SOFTAP_UAPSD_OFF
11093 ret
= dhd_iovar(dhd
, 0, "wme_apsd", (char *)&wme_apsd
, sizeof(wme_apsd
), NULL
, 0,
11096 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11097 __FUNCTION__
, ret
));
11099 #endif /* SOFTAP_UAPSD_OFF */
11100 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
11101 /* set AP flag for specific country code of SOFTAP */
11102 dhd
->dhd_cflags
|= WLAN_PLAT_AP_FLAG
| WLAN_PLAT_NODFS_FLAG
;
11103 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
11104 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
11105 (op_mode
== DHD_FLAG_MFG_MODE
)) {
11106 #if defined(ARP_OFFLOAD_SUPPORT)
11108 #endif /* ARP_OFFLOAD_SUPPORT */
11109 #ifdef PKT_FILTER_SUPPORT
11110 dhd_pkt_filter_enable
= FALSE
;
11111 #endif /* PKT_FILTER_SUPPORT */
11112 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
11113 #ifdef USE_DYNAMIC_F2_BLKSIZE
11114 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11115 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11116 if (FW_SUPPORTED(dhd
, rsdb
)) {
11118 ret
= dhd_iovar(dhd
, 0, "rsdb_mode", (char *)&rsdb_mode
, sizeof(rsdb_mode
),
11121 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11122 __FUNCTION__
, ret
));
11126 uint32 concurrent_mode
= 0;
11127 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_P2P_MODE
) ||
11128 (op_mode
== DHD_FLAG_P2P_MODE
)) {
11129 #if defined(ARP_OFFLOAD_SUPPORT)
11132 #ifdef PKT_FILTER_SUPPORT
11133 dhd_pkt_filter_enable
= FALSE
;
11135 dhd
->op_mode
= DHD_FLAG_P2P_MODE
;
11136 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_IBSS_MODE
) ||
11137 (op_mode
== DHD_FLAG_IBSS_MODE
)) {
11138 dhd
->op_mode
= DHD_FLAG_IBSS_MODE
;
11140 dhd
->op_mode
= DHD_FLAG_STA_MODE
;
11141 #if !defined(AP) && defined(WLP2P)
11142 if (dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
&&
11143 (concurrent_mode
= dhd_get_concurrent_capabilites(dhd
))) {
11144 #if defined(ARP_OFFLOAD_SUPPORT)
11147 dhd
->op_mode
|= concurrent_mode
;
11150 /* Check if we are enabling p2p */
11151 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11152 ret
= dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0,
11155 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__
, ret
));
11157 #if defined(SOFTAP_AND_GC)
11158 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_AP
,
11159 (char *)&ap_mode
, sizeof(ap_mode
), TRUE
, 0)) < 0) {
11160 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__
, ret
));
11163 memcpy(&p2p_ea
, &dhd
->mac
, ETHER_ADDR_LEN
);
11164 ETHER_SET_LOCALADDR(&p2p_ea
);
11165 ret
= dhd_iovar(dhd
, 0, "p2p_da_override", (char *)&p2p_ea
, sizeof(p2p_ea
),
11168 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__
, ret
));
11170 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11173 (void)concurrent_mode
;
11177 if (dhd
->conf
->sd_f2_blocksize
)
11178 dhdsdio_func_blocksize(dhd
, 2, dhd
->conf
->sd_f2_blocksize
);
11181 #if defined(RSDB_MODE_FROM_FILE)
11182 (void)dhd_rsdb_mode_from_file(dhd
);
11185 #ifdef DISABLE_PRUNED_SCAN
11186 if (FW_SUPPORTED(dhd
, rsdb
)) {
11187 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11188 sizeof(scan_features
), iovbuf
, sizeof(iovbuf
), FALSE
);
11190 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11191 __FUNCTION__
, ret
));
11193 memcpy(&scan_features
, iovbuf
, 4);
11194 scan_features
&= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM
;
11195 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11196 sizeof(scan_features
), NULL
, 0, TRUE
);
11198 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11199 __FUNCTION__
, ret
));
11203 #endif /* DISABLE_PRUNED_SCAN */
11205 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG
"\n",
11206 dhd
->op_mode
, MAC2STRDBG(dhd
->mac
.octet
)));
11207 #ifdef CUSTOMER_HW2
11208 #if defined(DHD_BLOB_EXISTENCE_CHECK)
11209 if (!dhd
->pub
.is_blob
)
11210 #endif /* DHD_BLOB_EXISTENCE_CHECK */
11212 /* get a ccode and revision for the country code */
11213 #if defined(CUSTOM_COUNTRY_CODE)
11214 get_customized_country_code(dhd
->info
->adapter
, dhd
->dhd_cspec
.country_abbrev
,
11215 &dhd
->dhd_cspec
, dhd
->dhd_cflags
);
11217 get_customized_country_code(dhd
->info
->adapter
, dhd
->dhd_cspec
.country_abbrev
,
11219 #endif /* CUSTOM_COUNTRY_CODE */
11221 #endif /* CUSTOMER_HW2 */
11223 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11224 if (dhd
->op_mode
== DHD_FLAG_HOSTAP_MODE
)
11225 dhd
->info
->rxthread_enabled
= FALSE
;
11227 dhd
->info
->rxthread_enabled
= TRUE
;
11229 /* Set Country code */
11230 if (dhd
->dhd_cspec
.ccode
[0] != 0) {
11231 ret
= dhd_iovar(dhd
, 0, "country", (char *)&dhd
->dhd_cspec
, sizeof(wl_country_t
),
11234 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__
));
11238 /* Set Listen Interval */
11239 ret
= dhd_iovar(dhd
, 0, "assoc_listen", (char *)&listen_interval
, sizeof(listen_interval
),
11242 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__
, ret
));
11244 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11245 #ifdef USE_WFA_CERT_CONF
11246 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_ROAMOFF
, &roamvar
) == BCME_OK
) {
11247 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__
, roamvar
));
11249 #endif /* USE_WFA_CERT_CONF */
11250 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11251 dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
, sizeof(roamvar
), NULL
, 0, TRUE
);
11252 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11253 #if defined(ROAM_ENABLE)
11254 #ifdef DISABLE_BCNLOSS_ROAM
11255 dhd_iovar(dhd
, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off
, sizeof(roam_bcnloss_off
),
11257 #endif /* DISABLE_BCNLOSS_ROAM */
11258 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, roam_trigger
,
11259 sizeof(roam_trigger
), TRUE
, 0)) < 0)
11260 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__
, ret
));
11261 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
, roam_scan_period
,
11262 sizeof(roam_scan_period
), TRUE
, 0)) < 0)
11263 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__
, ret
));
11264 if ((dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, roam_delta
,
11265 sizeof(roam_delta
), TRUE
, 0)) < 0)
11266 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__
, ret
));
11267 ret
= dhd_iovar(dhd
, 0, "fullroamperiod", (char *)&roam_fullscan_period
,
11268 sizeof(roam_fullscan_period
), NULL
, 0, TRUE
);
11270 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__
, ret
));
11271 #endif /* ROAM_ENABLE */
11273 #ifdef CUSTOM_EVENT_PM_WAKE
11274 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh", (char *)&pm_awake_thresh
,
11275 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
11277 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__
, ret
));
11279 #endif /* CUSTOM_EVENT_PM_WAKE */
11281 #ifdef ENABLE_TDLS_AUTO_MODE
11282 /* by default TDLS on and auto mode on */
11283 _dhd_tdls_enable(dhd
, true, true, NULL
);
11285 /* by default TDLS on and auto mode off */
11286 _dhd_tdls_enable(dhd
, true, false, NULL
);
11287 #endif /* ENABLE_TDLS_AUTO_MODE */
11288 #endif /* WLTDLS */
11290 #ifdef DHD_ENABLE_LPC
11292 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11294 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
11296 if (ret
== BCME_NOTDOWN
) {
11298 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11299 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11300 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__
, ret
, lpc
));
11302 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11303 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__
, ret
));
11306 #endif /* DHD_ENABLE_LPC */
11309 #ifdef WLADPS_SEAK_AP_WAR
11310 dhd
->disabled_adps
= FALSE
;
11311 #endif /* WLADPS_SEAK_AP_WAR */
11312 if (dhd
->op_mode
& DHD_FLAG_STA_MODE
) {
11313 #ifdef ADPS_MODE_FROM_FILE
11314 dhd_adps_mode_from_file(dhd
);
11316 if ((ret
= dhd_enable_adps(dhd
, ADPS_ENABLE
)) != BCME_OK
) {
11317 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11318 __FUNCTION__
, ret
));
11320 #endif /* ADPS_MODE_FROM_FILE */
11322 #endif /* WLADPS */
11324 /* Set PowerSave mode */
11325 (void) dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
, sizeof(power_mode
), TRUE
, 0);
11327 #if defined(BCMSDIO)
11328 /* Match Host and Dongle rx alignment */
11329 dhd_iovar(dhd
, 0, "bus:txglomalign", (char *)&dongle_align
, sizeof(dongle_align
),
11332 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
11333 /* enable credall to reduce the chance of no bus credit happened. */
11334 dhd_iovar(dhd
, 0, "bus:credall", (char *)&credall
, sizeof(credall
), NULL
, 0, TRUE
);
11337 #ifdef USE_WFA_CERT_CONF
11338 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_BUS_TXGLOM_MODE
, &glom
) == BCME_OK
) {
11339 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__
, glom
));
11341 #endif /* USE_WFA_CERT_CONF */
11342 if (glom
!= DEFAULT_GLOM_VALUE
) {
11343 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__
, glom
));
11344 dhd_iovar(dhd
, 0, "bus:txglom", (char *)&glom
, sizeof(glom
), NULL
, 0, TRUE
);
11346 #endif /* defined(BCMSDIO) */
11348 /* Setup timeout if Beacons are lost and roam is off to report link down */
11349 dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
, sizeof(bcn_timeout
), NULL
, 0, TRUE
);
11351 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11352 dhd_iovar(dhd
, 0, "assoc_retry_max", (char *)&retry_max
, sizeof(retry_max
), NULL
, 0, TRUE
);
11354 #if defined(AP) && !defined(WLP2P)
11355 dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0, TRUE
);
11357 #endif /* defined(AP) && !defined(WLP2P) */
11359 #ifdef MIMO_ANT_SETTING
11360 dhd_sel_ant_from_file(dhd
);
11361 #endif /* MIMO_ANT_SETTING */
11363 #if defined(SOFTAP)
11364 if (ap_fw_loaded
== TRUE
) {
11365 dhd_wl_ioctl_cmd(dhd
, WLC_SET_DTIMPRD
, (char *)&dtim
, sizeof(dtim
), TRUE
, 0);
11369 #if defined(KEEP_ALIVE)
11371 /* Set Keep Alive : be sure to use FW with -keepalive */
11374 #if defined(SOFTAP)
11375 if (ap_fw_loaded
== FALSE
)
11377 if (!(dhd
->op_mode
&
11378 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))) {
11379 if ((res
= dhd_keep_alive_onoff(dhd
)) < 0)
11380 DHD_ERROR(("%s set keeplive failed %d\n",
11381 __FUNCTION__
, res
));
11384 #endif /* defined(KEEP_ALIVE) */
11387 ret
= dhd_iovar(dhd
, 0, "txbf", (char *)&txbf
, sizeof(txbf
), NULL
, 0, TRUE
);
11389 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__
, ret
));
11391 #endif /* USE_WL_TXBF */
11393 ret
= dhd_iovar(dhd
, 0, "scancache", (char *)&scancache_enab
, sizeof(scancache_enab
), NULL
,
11396 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__
, ret
));
11399 #ifdef DISABLE_TXBFR
11400 ret
= dhd_iovar(dhd
, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap
, sizeof(txbf_bfr_cap
), NULL
,
11403 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__
, ret
));
11405 #endif /* DISABLE_TXBFR */
11407 #ifdef USE_WFA_CERT_CONF
11408 #ifdef USE_WL_FRAMEBURST
11409 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_FRAMEBURST
, &frameburst
) == BCME_OK
) {
11410 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__
, frameburst
));
11412 #endif /* USE_WL_FRAMEBURST */
11413 #ifdef DISABLE_FRAMEBURST_VSDB
11414 g_frameburst
= frameburst
;
11415 #endif /* DISABLE_FRAMEBURST_VSDB */
11416 #endif /* USE_WFA_CERT_CONF */
11417 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11418 /* Disable Framebursting for SofAP */
11419 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
11422 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11423 /* Set frameburst to value */
11424 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_FAKEFRAG
, (char *)&frameburst
,
11425 sizeof(frameburst
), TRUE
, 0)) < 0) {
11426 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__
, ret
));
11429 iov_buf
= (char*)kmalloc(WLC_IOCTL_SMLEN
, GFP_KERNEL
);
11430 if (iov_buf
== NULL
) {
11431 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN
));
11437 #if defined(CUSTOM_AMPDU_BA_WSIZE)
11438 /* Set ampdu ba wsize to 64 or 16 */
11439 #ifdef CUSTOM_AMPDU_BA_WSIZE
11440 ampdu_ba_wsize
= CUSTOM_AMPDU_BA_WSIZE
;
11442 if (ampdu_ba_wsize
!= 0) {
11443 ret
= dhd_iovar(dhd
, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize
,
11444 sizeof(ampdu_ba_wsize
), NULL
, 0, TRUE
);
11446 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11447 __FUNCTION__
, ampdu_ba_wsize
, ret
));
11452 #ifdef ENABLE_TEMP_THROTTLING
11453 if (dhd
->op_mode
& DHD_FLAG_STA_MODE
) {
11454 memset(&temp_control
, 0, sizeof(temp_control
));
11455 temp_control
.enable
= 1;
11456 temp_control
.control_bit
= TEMP_THROTTLE_CONTROL_BIT
;
11457 ret
= dhd_iovar(dhd
, 0, "temp_throttle_control", (char *)&temp_control
,
11458 sizeof(temp_control
), NULL
, 0, TRUE
);
11460 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
11461 __FUNCTION__
, ret
));
11464 #endif /* ENABLE_TEMP_THROTTLING */
11466 #if defined(CUSTOM_AMPDU_MPDU)
11467 ampdu_mpdu
= CUSTOM_AMPDU_MPDU
;
11468 if (ampdu_mpdu
!= 0 && (ampdu_mpdu
<= ampdu_ba_wsize
)) {
11469 ret
= dhd_iovar(dhd
, 0, "ampdu_mpdu", (char *)&du_mpdu
, sizeof(ampdu_mpdu
),
11472 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11473 __FUNCTION__
, CUSTOM_AMPDU_MPDU
, ret
));
11476 #endif /* CUSTOM_AMPDU_MPDU */
11478 #if defined(CUSTOM_AMPDU_RELEASE)
11479 ampdu_release
= CUSTOM_AMPDU_RELEASE
;
11480 if (ampdu_release
!= 0 && (ampdu_release
<= ampdu_ba_wsize
)) {
11481 ret
= dhd_iovar(dhd
, 0, "ampdu_release", (char *)&du_release
,
11482 sizeof(ampdu_release
), NULL
, 0, TRUE
);
11484 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11485 __FUNCTION__
, CUSTOM_AMPDU_RELEASE
, ret
));
11488 #endif /* CUSTOM_AMPDU_RELEASE */
11490 #if defined(CUSTOM_AMSDU_AGGSF)
11491 amsdu_aggsf
= CUSTOM_AMSDU_AGGSF
;
11492 if (amsdu_aggsf
!= 0) {
11493 ret
= dhd_iovar(dhd
, 0, "amsdu_aggsf", (char *)&amsdu_aggsf
, sizeof(amsdu_aggsf
),
11496 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11497 __FUNCTION__
, CUSTOM_AMSDU_AGGSF
, ret
));
11500 #endif /* CUSTOM_AMSDU_AGGSF */
11502 #if defined(SUPPORT_5G_1024QAM_VHT)
11503 #ifdef SUPPORT_5G_1024QAM_VHT
11504 if (dhd_get_chipid(dhd
) == BCM4361_CHIP_ID
) {
11505 vht_features
|= 0x6; /* 5G 1024 QAM support */
11507 #endif /* SUPPORT_5G_1024QAM_VHT */
11508 if (vht_features
) {
11509 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
, sizeof(vht_features
),
11512 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__
, ret
));
11514 if (ret
== BCME_NOTDOWN
) {
11516 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11517 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11518 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11519 " vht_features = 0x%x\n",
11520 __FUNCTION__
, ret
, vht_features
));
11522 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
,
11523 sizeof(vht_features
), NULL
, 0, TRUE
);
11524 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__
, ret
));
11529 #ifdef DISABLE_11N_PROPRIETARY_RATES
11530 ret
= dhd_iovar(dhd
, 0, "ht_features", (char *)&ht_features
, sizeof(ht_features
), NULL
, 0,
11533 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__
, ret
));
11535 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11536 #ifdef CUSTOM_PSPRETEND_THR
11537 /* Turn off MPC in AP mode */
11538 ret
= dhd_iovar(dhd
, 0, "pspretend_threshold", (char *)&pspretend_thr
,
11539 sizeof(pspretend_thr
), NULL
, 0, TRUE
);
11541 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
11542 __FUNCTION__
, ret
));
11546 ret
= dhd_iovar(dhd
, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4
, sizeof(buf_key_b4_m4
),
11549 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__
, ret
));
11551 #ifdef SUPPORT_SET_CAC
11552 bcm_mkiovar("cac", (char *)&cac
, sizeof(cac
), iovbuf
, sizeof(iovbuf
));
11553 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
11554 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__
, cac
, ret
));
11556 #endif /* SUPPORT_SET_CAC */
11558 /* Get the required details from dongle during preinit ioctl */
11559 dhd_ulp_preinit(dhd
);
11560 #endif /* DHD_ULP */
11562 /* Read event_msgs mask */
11563 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
,
11564 sizeof(iovbuf
), FALSE
);
11566 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__
, ret
));
11569 bcopy(iovbuf
, eventmask
, WL_EVENTING_MASK_LEN
);
11571 /* Setup event_msgs */
11572 setbit(eventmask
, WLC_E_SET_SSID
);
11573 setbit(eventmask
, WLC_E_PRUNE
);
11574 setbit(eventmask
, WLC_E_AUTH
);
11575 setbit(eventmask
, WLC_E_AUTH_IND
);
11576 setbit(eventmask
, WLC_E_ASSOC
);
11577 setbit(eventmask
, WLC_E_REASSOC
);
11578 setbit(eventmask
, WLC_E_REASSOC_IND
);
11579 if (!(dhd
->op_mode
& DHD_FLAG_IBSS_MODE
))
11580 setbit(eventmask
, WLC_E_DEAUTH
);
11581 setbit(eventmask
, WLC_E_DEAUTH_IND
);
11582 setbit(eventmask
, WLC_E_DISASSOC_IND
);
11583 setbit(eventmask
, WLC_E_DISASSOC
);
11584 setbit(eventmask
, WLC_E_JOIN
);
11585 setbit(eventmask
, WLC_E_BSSID
);
11586 setbit(eventmask
, WLC_E_START
);
11587 setbit(eventmask
, WLC_E_ASSOC_IND
);
11588 setbit(eventmask
, WLC_E_PSK_SUP
);
11589 setbit(eventmask
, WLC_E_LINK
);
11590 setbit(eventmask
, WLC_E_MIC_ERROR
);
11591 setbit(eventmask
, WLC_E_ASSOC_REQ_IE
);
11592 setbit(eventmask
, WLC_E_ASSOC_RESP_IE
);
11593 #ifdef LIMIT_BORROW
11594 setbit(eventmask
, WLC_E_ALLOW_CREDIT_BORROW
);
11596 #ifndef WL_CFG80211
11597 setbit(eventmask
, WLC_E_PMKID_CACHE
);
11598 setbit(eventmask
, WLC_E_TXFAIL
);
11600 setbit(eventmask
, WLC_E_JOIN_START
);
11601 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
11603 setbit(eventmask
, WLC_E_SCAN_CONFIRM_IND
);
11605 #ifdef WLMEDIA_HTSF
11606 setbit(eventmask
, WLC_E_HTSFSYNC
);
11607 #endif /* WLMEDIA_HTSF */
11609 setbit(eventmask
, WLC_E_PFN_NET_FOUND
);
11610 setbit(eventmask
, WLC_E_PFN_BEST_BATCHING
);
11611 setbit(eventmask
, WLC_E_PFN_BSSID_NET_FOUND
);
11612 setbit(eventmask
, WLC_E_PFN_BSSID_NET_LOST
);
11613 #endif /* PNO_SUPPORT */
11614 /* enable dongle roaming event */
11615 setbit(eventmask
, WLC_E_ROAM
);
11617 setbit(eventmask
, WLC_E_TDLS_PEER_EVENT
);
11618 #endif /* WLTDLS */
11620 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
11621 #endif /* WL_ESCAN */
11623 setbit(eventmask
, WLC_E_PROXD
);
11624 #endif /* RTT_SUPPORT */
11626 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
11627 setbit(eventmask
, WLC_E_AP_STARTED
);
11628 setbit(eventmask
, WLC_E_ACTION_FRAME_RX
);
11629 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11630 setbit(eventmask
, WLC_E_P2P_DISC_LISTEN_COMPLETE
);
11632 #endif /* WL_CFG80211 */
11634 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11635 if (dhd_logtrace_from_file(dhd
)) {
11636 setbit(eventmask
, WLC_E_TRACE
);
11638 clrbit(eventmask
, WLC_E_TRACE
);
11640 #elif defined(SHOW_LOGTRACE)
11641 setbit(eventmask
, WLC_E_TRACE
);
11643 clrbit(eventmask
, WLC_E_TRACE
);
11644 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11646 setbit(eventmask
, WLC_E_CSA_COMPLETE_IND
);
11648 setbit(eventmask
, WLC_E_PSTA_PRIMARY_INTF_IND
);
11650 #ifdef CUSTOM_EVENT_PM_WAKE
11651 setbit(eventmask
, WLC_E_EXCESS_PM_WAKE_EVENT
);
11652 #endif /* CUSTOM_EVENT_PM_WAKE */
11653 #ifdef DHD_LOSSLESS_ROAMING
11654 setbit(eventmask
, WLC_E_ROAM_PREP
);
11656 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11657 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11658 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11660 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11661 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11662 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11664 #ifdef SUSPEND_EVENT
11665 bcopy(eventmask
, dhd
->conf
->resume_eventmask
, WL_EVENTING_MASK_LEN
);
11667 /* Write updated Event mask */
11668 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, NULL
, 0, TRUE
);
11670 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__
, ret
));
11674 /* make up event mask ext message iovar for event larger than 128 */
11675 msglen
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
+ EVENTMSGS_EXT_STRUCT_SIZE
;
11676 eventmask_msg
= (eventmsgs_ext_t
*)kmalloc(msglen
, GFP_KERNEL
);
11677 if (eventmask_msg
== NULL
) {
11678 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen
));
11682 bzero(eventmask_msg
, msglen
);
11683 eventmask_msg
->ver
= EVENTMSGS_VER
;
11684 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11686 /* Read event_msgs_ext mask */
11687 ret2
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, iov_buf
,
11688 WLC_IOCTL_SMLEN
, FALSE
);
11690 if (ret2
== 0) { /* event_msgs_ext must be supported */
11691 bcopy(iov_buf
, eventmask_msg
, msglen
);
11692 #ifdef RSSI_MONITOR_SUPPORT
11693 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11694 #endif /* RSSI_MONITOR_SUPPORT */
11695 #ifdef GSCAN_SUPPORT
11696 setbit(eventmask_msg
->mask
, WLC_E_PFN_GSCAN_FULL_RESULT
);
11697 setbit(eventmask_msg
->mask
, WLC_E_PFN_SCAN_COMPLETE
);
11698 setbit(eventmask_msg
->mask
, WLC_E_PFN_SSID_EXT
);
11699 setbit(eventmask_msg
->mask
, WLC_E_ROAM_EXP_EVENT
);
11700 #endif /* GSCAN_SUPPORT */
11701 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11702 #ifdef BT_WIFI_HANDOVER
11703 setbit(eventmask_msg
->mask
, WLC_E_BT_WIFI_HANDOVER_REQ
);
11704 #endif /* BT_WIFI_HANDOVER */
11706 setbit(eventmask_msg
->mask
, WLC_E_ROAM_PREP
);
11707 #endif /* DBG_PKT_MON */
11709 setbit(eventmask_msg
->mask
, WLC_E_ULP
);
11711 #ifdef ENABLE_TEMP_THROTTLING
11712 setbit(eventmask_msg
->mask
, WLC_E_TEMP_THROTTLE
);
11713 #endif /* ENABLE_TEMP_THROTTLING */
11715 /* Write updated Event mask */
11716 eventmask_msg
->ver
= EVENTMSGS_VER
;
11717 eventmask_msg
->command
= EVENTMSGS_SET_MASK
;
11718 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11719 ret
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, NULL
, 0,
11722 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__
, ret
));
11725 } else if (ret2
== BCME_UNSUPPORTED
|| ret2
== BCME_VERSION
) {
11726 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11727 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11728 __FUNCTION__
, ret2
));
11730 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__
, ret2
));
11735 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11736 /* Enabling event log trace for EAP events */
11737 el_tag
= (wl_el_tag_params_t
*)kmalloc(sizeof(wl_el_tag_params_t
), GFP_KERNEL
);
11738 if (el_tag
== NULL
) {
11739 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11740 (int)sizeof(wl_el_tag_params_t
)));
11744 el_tag
->tag
= EVENT_LOG_TAG_4WAYHANDSHAKE
;
11746 el_tag
->flags
= EVENT_LOG_TAG_FLAG_LOG
;
11747 bcm_mkiovar("event_log_tag_control", (char *)el_tag
,
11748 sizeof(*el_tag
), iovbuf
, sizeof(iovbuf
));
11749 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
11750 #endif /* DHD_8021X_DUMP */
11752 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_CHANNEL_TIME
, (char *)&scan_assoc_time
,
11753 sizeof(scan_assoc_time
), TRUE
, 0);
11754 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_UNASSOC_TIME
, (char *)&scan_unassoc_time
,
11755 sizeof(scan_unassoc_time
), TRUE
, 0);
11756 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_PASSIVE_TIME
, (char *)&scan_passive_time
,
11757 sizeof(scan_passive_time
), TRUE
, 0);
11759 #ifdef ARP_OFFLOAD_SUPPORT
11760 /* Set and enable ARP offload feature for STA only */
11761 #if defined(SOFTAP)
11762 if (arpoe
&& !ap_fw_loaded
)
11767 dhd_arp_offload_enable(dhd
, TRUE
);
11768 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
11770 dhd_arp_offload_enable(dhd
, FALSE
);
11771 dhd_arp_offload_set(dhd
, 0);
11773 dhd_arp_enable
= arpoe
;
11774 #endif /* ARP_OFFLOAD_SUPPORT */
11776 #ifdef PKT_FILTER_SUPPORT
11777 /* Setup default defintions for pktfilter , enable in suspend */
11778 if (dhd_master_mode
) {
11779 dhd
->pktfilter_count
= 6;
11780 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = NULL
;
11781 if (!FW_SUPPORTED(dhd
, pf6
)) {
11782 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = NULL
;
11783 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11785 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11786 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = DISCARD_IPV4_MCAST
;
11787 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = DISCARD_IPV6_MCAST
;
11789 /* apply APP pktfilter */
11790 dhd
->pktfilter
[DHD_ARP_FILTER_NUM
] = "105 0 0 12 0xFFFF 0x0806";
11792 /* Setup filter to allow only unicast */
11793 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0x01 0x00";
11795 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11796 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = NULL
;
11798 dhd
->pktfilter
[DHD_BROADCAST_ARP_FILTER_NUM
] = NULL
;
11799 if (FW_SUPPORTED(dhd
, pf6
)) {
11800 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11801 dhd
->pktfilter
[DHD_IP4BCAST_DROP_FILTER_NUM
] =
11802 "107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff";
11803 dhd
->pktfilter_count
= 8;
11806 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11807 dhd
->pktfilter_count
= 4;
11808 /* Setup filter to block broadcast and NAT Keepalive packets */
11809 /* discard all broadcast packets */
11810 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0xffffff 0xffffff";
11811 /* discard NAT Keepalive packets */
11812 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = "102 0 0 36 0xffffffff 0x11940009";
11813 /* discard NAT Keepalive packets */
11814 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = "104 0 0 38 0xffffffff 0x11940009";
11815 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11816 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11818 dhd_conf_discard_pkt_filter(dhd
);
11819 dhd_conf_add_pkt_filter(dhd
);
11821 #if defined(SOFTAP)
11822 if (ap_fw_loaded
) {
11823 dhd_enable_packet_filter(0, dhd
);
11825 #endif /* defined(SOFTAP) */
11826 dhd_set_packet_filter(dhd
);
11827 #endif /* PKT_FILTER_SUPPORT */
11829 ret
= dhd_iovar(dhd
, 0, "nmode", (char *)&nmode
, sizeof(nmode
), NULL
, 0, TRUE
);
11831 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__
, ret
));
11832 #endif /* DISABLE_11N */
11834 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11835 dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
, sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
11836 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11837 /* query for 'clmver' to get clm version info from firmware */
11838 memset(buf
, 0, sizeof(buf
));
11839 ret
= dhd_iovar(dhd
, 0, "clmver", NULL
, 0, buf
, sizeof(buf
), FALSE
);
11841 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
11843 char *clmver_temp_buf
= NULL
;
11845 if ((clmver_temp_buf
= bcmstrstr(buf
, "Data:")) == NULL
) {
11846 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11848 ptr
= (clmver_temp_buf
+ strlen("Data:"));
11849 if ((clmver_temp_buf
= bcmstrtok(&ptr
, "\n", 0)) == NULL
) {
11850 DHD_ERROR(("Couldn't find New line character\n"));
11852 memset(clm_version
, 0, CLM_VER_STR_LEN
);
11853 strncpy(clm_version
, clmver_temp_buf
,
11854 MIN(strlen(clmver_temp_buf
), CLM_VER_STR_LEN
- 1));
11859 /* query for 'ver' to get version info from firmware */
11860 memset(buf
, 0, sizeof(buf
));
11862 ret
= dhd_iovar(dhd
, 0, "ver", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
11864 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
11866 bcmstrtok(&ptr
, "\n", 0);
11867 strncpy(fw_version
, buf
, FW_VER_STR_LEN
);
11868 fw_version
[FW_VER_STR_LEN
-1] = '\0';
11869 dhd_set_version_info(dhd
, buf
);
11870 #ifdef WRITE_WLANINFO
11871 sec_save_wlinfo(buf
, EPI_VERSION_STR
, dhd
->info
->nv_path
, clm_version
);
11872 #endif /* WRITE_WLANINFO */
11874 #ifdef GEN_SOFTAP_INFO_FILE
11875 sec_save_softap_info();
11876 #endif /* GEN_SOFTAP_INFO_FILE */
11878 #if defined(BCMSDIO)
11879 dhd_txglom_enable(dhd
, dhd
->conf
->bus_rxglom
);
11880 #endif /* defined(BCMSDIO) */
11882 #if defined(BCMSDIO) || defined(BCMDBUS)
11883 #ifdef PROP_TXSTATUS
11884 if (disable_proptx
||
11885 #ifdef PROP_TXSTATUS_VSDB
11886 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
11887 (dhd
->op_mode
!= DHD_FLAG_HOSTAP_MODE
&&
11888 dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
) ||
11889 #endif /* PROP_TXSTATUS_VSDB */
11891 wlfc_enable
= FALSE
;
11893 ret
= dhd_conf_get_disable_proptx(dhd
);
11895 disable_proptx
= 0;
11896 wlfc_enable
= TRUE
;
11897 } else if (ret
>= 1) {
11898 disable_proptx
= 1;
11899 wlfc_enable
= FALSE
;
11900 /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
11904 #if defined(PROP_TXSTATUS)
11905 #ifdef USE_WFA_CERT_CONF
11906 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_PROPTX
, &proptx
) == BCME_OK
) {
11907 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__
, proptx
));
11908 wlfc_enable
= proptx
;
11910 #endif /* USE_WFA_CERT_CONF */
11911 #endif /* PROP_TXSTATUS */
11913 #ifndef DISABLE_11N
11914 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11915 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
, sizeof(hostreorder
),
11918 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__
, ret2
));
11919 if (ret2
!= BCME_UNSUPPORTED
)
11922 if (ret
== BCME_NOTDOWN
) {
11924 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
,
11925 sizeof(wl_down
), TRUE
, 0);
11926 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
11927 __FUNCTION__
, ret2
, hostreorder
));
11929 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
,
11930 sizeof(hostreorder
), NULL
, 0, TRUE
);
11931 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__
, ret2
));
11932 if (ret2
!= BCME_UNSUPPORTED
)
11935 if (ret2
!= BCME_OK
)
11938 #endif /* DISABLE_11N */
11942 dhd_wlfc_init(dhd
);
11943 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
11944 dhd_conf_set_intiovar(dhd
, WLC_SET_VAR
, "ampdu_hostreorder", 1, 0, TRUE
);
11946 #ifndef DISABLE_11N
11947 else if (hostreorder
)
11948 dhd_wlfc_hostreorder_init(dhd
);
11949 #endif /* DISABLE_11N */
11951 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
11952 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__
);
11953 dhd_conf_set_intiovar(dhd
, WLC_SET_VAR
, "ampdu_hostreorder", 0, 0, TRUE
);
11954 #endif /* PROP_TXSTATUS */
11955 #endif /* BCMSDIO || BCMDBUS */
11956 #ifndef PCIE_FULL_DONGLE
11957 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11958 if (FW_SUPPORTED(dhd
, ap
)) {
11959 wl_ap_isolate
= AP_ISOLATE_SENDUP_ALL
;
11960 ret
= dhd_iovar(dhd
, 0, "ap_isolate", (char *)&wl_ap_isolate
, sizeof(wl_ap_isolate
),
11963 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
11965 #endif /* PCIE_FULL_DONGLE */
11967 if (!dhd
->pno_state
) {
11972 if (!dhd
->rtt_state
) {
11973 ret
= dhd_rtt_init(dhd
);
11975 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__
));
11980 dhd_interworking_enable(dhd
);
11983 #ifdef SUPPORT_SENSORHUB
11984 DHD_ERROR(("%s: SensorHub enabled %d\n",
11985 __FUNCTION__
, dhd
->info
->shub_enable
));
11986 ret2
= dhd_iovar(dhd
, 0, "shub", NULL
, 0,
11987 (char *)&shub_ctl
, sizeof(shub_ctl
), FALSE
);
11989 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
11990 __FUNCTION__
, ret2
));
11991 dhd
->info
->shub_enable
= 0;
11993 dhd
->info
->shub_enable
= shub_ctl
.enable
;
11994 DHD_ERROR(("%s: checking sensorhub enable %d\n",
11995 __FUNCTION__
, dhd
->info
->shub_enable
));
11998 DHD_ERROR(("%s: SensorHub diabled %d\n",
11999 __FUNCTION__
, dhd
->info
->shub_enable
));
12000 dhd
->info
->shub_enable
= FALSE
;
12001 shub_ctl
.enable
= FALSE
;
12002 ret2
= dhd_iovar(dhd
, 0, "shub", (char *)&shub_ctl
, sizeof(shub_ctl
),
12005 DHD_ERROR(("%s failed to set ShubHub disable\n",
12008 #endif /* SUPPORT_SENSORHUB */
12011 #ifdef NDO_CONFIG_SUPPORT
12012 dhd
->ndo_enable
= FALSE
;
12013 dhd
->ndo_host_ip_overflow
= FALSE
;
12014 dhd
->ndo_max_host_ip
= NDO_MAX_HOST_IP_ENTRIES
;
12015 #endif /* NDO_CONFIG_SUPPORT */
12017 /* ND offload version supported */
12018 dhd
->ndo_version
= dhd_ndo_get_version(dhd
);
12019 if (dhd
->ndo_version
> 0) {
12020 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__
, dhd
->ndo_version
));
12022 #ifdef NDO_CONFIG_SUPPORT
12023 /* enable Unsolicited NA filter */
12024 ret
= dhd_ndo_unsolicited_na_filter_enable(dhd
, 1);
12026 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__
));
12028 #endif /* NDO_CONFIG_SUPPORT */
12031 /* check dongle supports wbtext or not */
12032 dhd
->wbtext_support
= FALSE
;
12033 if (dhd_wl_ioctl_get_intiovar(dhd
, "wnm_bsstrans_resp", &wnm_bsstrans_resp
,
12034 WLC_GET_VAR
, FALSE
, 0) != BCME_OK
) {
12035 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12037 if (wnm_bsstrans_resp
== WL_BSSTRANS_POLICY_PRODUCT_WBTEXT
) {
12038 dhd
->wbtext_support
= TRUE
;
12041 /* driver can turn off wbtext feature through makefile */
12042 if (dhd
->wbtext_support
) {
12043 if (dhd_wl_ioctl_set_intiovar(dhd
, "wnm_bsstrans_resp",
12044 WL_BSSTRANS_POLICY_ROAM_ALWAYS
,
12045 WLC_SET_VAR
, FALSE
, 0) != BCME_OK
) {
12046 DHD_ERROR(("failed to disable WBTEXT\n"));
12049 #endif /* !WBTEXT */
12051 /* WNM capabilities */
12054 | WL_WNM_BSSTRANS
| WL_WNM_NOTIF
12057 | WL_WNM_BSSTRANS
| WL_WNM_MAXIDLE
12060 if (dhd_iovar(dhd
, 0, "wnm", (char *)&wnm_cap
, sizeof(wnm_cap
), NULL
, 0, TRUE
) < 0) {
12061 DHD_ERROR(("failed to set WNM capabilities\n"));
12064 dhd_conf_postinit_ioctls(dhd
);
12068 kfree(eventmask_msg
);
12071 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12074 #endif /* DHD_8021X_DUMP */
12080 dhd_iovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *param_buf
, uint param_len
, char *res_buf
,
12081 uint res_len
, int set
)
12088 if (res_len
> WLC_IOCTL_MAXLEN
|| param_len
> WLC_IOCTL_MAXLEN
)
12089 return BCME_BADARG
;
12091 input_len
= strlen(name
) + 1 + param_len
;
12092 if (input_len
> WLC_IOCTL_MAXLEN
)
12093 return BCME_BADARG
;
12097 if (res_buf
|| res_len
!= 0) {
12098 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__
));
12102 buf
= kzalloc(input_len
, GFP_KERNEL
);
12104 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12108 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12114 ioc
.cmd
= WLC_SET_VAR
;
12116 ioc
.len
= input_len
;
12119 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12121 if (!res_buf
|| !res_len
) {
12122 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__
));
12127 if (res_len
< input_len
) {
12128 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__
,
12129 res_len
, input_len
));
12130 buf
= kzalloc(input_len
, GFP_KERNEL
);
12132 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12136 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12142 ioc
.cmd
= WLC_GET_VAR
;
12144 ioc
.len
= input_len
;
12147 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12149 if (ret
== BCME_OK
) {
12150 memcpy(res_buf
, buf
, res_len
);
12153 memset(res_buf
, 0, res_len
);
12154 ret
= bcm_mkiovar(name
, param_buf
, param_len
, res_buf
, res_len
);
12160 ioc
.cmd
= WLC_GET_VAR
;
12165 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12174 dhd_getiovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *cmd_buf
,
12175 uint cmd_len
, char **resptr
, uint resp_len
)
12177 int len
= resp_len
;
12179 char *buf
= *resptr
;
12181 if (resp_len
> WLC_IOCTL_MAXLEN
)
12182 return BCME_BADARG
;
12184 memset(buf
, 0, resp_len
);
12186 ret
= bcm_mkiovar(name
, cmd_buf
, cmd_len
, buf
, len
);
12188 return BCME_BUFTOOSHORT
;
12191 memset(&ioc
, 0, sizeof(ioc
));
12193 ioc
.cmd
= WLC_GET_VAR
;
12198 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12204 int dhd_change_mtu(dhd_pub_t
*dhdp
, int new_mtu
, int ifidx
)
12206 struct dhd_info
*dhd
= dhdp
->info
;
12207 struct net_device
*dev
= NULL
;
12209 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12210 dev
= dhd
->iflist
[ifidx
]->net
;
12213 if (netif_running(dev
)) {
12214 DHD_ERROR(("%s: Must be down to change its MTU\n", dev
->name
));
12215 return BCME_NOTDOWN
;
12218 #define DHD_MIN_MTU 1500
12219 #define DHD_MAX_MTU 1752
12221 if ((new_mtu
< DHD_MIN_MTU
) || (new_mtu
> DHD_MAX_MTU
)) {
12222 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__
, new_mtu
));
12223 return BCME_BADARG
;
12226 dev
->mtu
= new_mtu
;
12230 #ifdef ARP_OFFLOAD_SUPPORT
12231 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12233 aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
)
12235 u32 ipv4_buf
[MAX_IPV4_ENTRIES
]; /* temp save for AOE host_ip table */
12239 bzero(ipv4_buf
, sizeof(ipv4_buf
));
12241 /* display what we've got */
12242 ret
= dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12243 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__
));
12245 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12247 /* now we saved hoste_ip table, clr it in the dongle AOE */
12248 dhd_aoe_hostip_clr(dhd_pub
, idx
);
12251 DHD_ERROR(("%s failed\n", __FUNCTION__
));
12255 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
12256 if (add
&& (ipv4_buf
[i
] == 0)) {
12258 add
= FALSE
; /* added ipa to local table */
12259 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12261 } else if (ipv4_buf
[i
] == ipa
) {
12263 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12264 __FUNCTION__
, ipa
, i
));
12267 if (ipv4_buf
[i
] != 0) {
12268 /* add back host_ip entries from our local cache */
12269 dhd_arp_offload_add_ip(dhd_pub
, ipv4_buf
[i
], idx
);
12270 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12271 __FUNCTION__
, ipv4_buf
[i
], i
));
12275 /* see the resulting hostip table */
12276 dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12277 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__
));
12278 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12283 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12284 * whenever there is an event related to an IP address.
12285 * ptr : kernel provided pointer to IP address that has changed
12287 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
12288 unsigned long event
,
12291 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
12294 dhd_pub_t
*dhd_pub
;
12297 if (!dhd_arp_enable
)
12298 return NOTIFY_DONE
;
12299 if (!ifa
|| !(ifa
->ifa_dev
->dev
))
12300 return NOTIFY_DONE
;
12302 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12303 /* Filter notifications meant for non Broadcom devices */
12304 if ((ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_pri
) &&
12305 (ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_virt
)) {
12306 #if defined(WL_ENABLE_P2P_IF)
12307 if (!wl_cfgp2p_is_ifops(ifa
->ifa_dev
->dev
->netdev_ops
))
12308 #endif /* WL_ENABLE_P2P_IF */
12309 return NOTIFY_DONE
;
12311 #endif /* LINUX_VERSION_CODE */
12313 dhd
= DHD_DEV_INFO(ifa
->ifa_dev
->dev
);
12315 return NOTIFY_DONE
;
12317 dhd_pub
= &dhd
->pub
;
12319 if (dhd_pub
->arp_version
== 1) {
12322 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
12323 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
== ifa
->ifa_dev
->dev
)
12326 if (idx
< DHD_MAX_IFS
)
12327 DHD_TRACE(("ifidx : %p %s %d\n", dhd
->iflist
[idx
]->net
,
12328 dhd
->iflist
[idx
]->name
, dhd
->iflist
[idx
]->idx
));
12330 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa
->ifa_label
));
12337 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12338 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12340 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
12341 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__
));
12342 if (dhd
->pend_ipaddr
) {
12343 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12344 __FUNCTION__
, dhd
->pend_ipaddr
));
12346 dhd
->pend_ipaddr
= ifa
->ifa_address
;
12350 #ifdef AOE_IP_ALIAS_SUPPORT
12351 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12353 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, TRUE
, idx
);
12354 #endif /* AOE_IP_ALIAS_SUPPORT */
12358 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12359 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12360 dhd
->pend_ipaddr
= 0;
12361 #ifdef AOE_IP_ALIAS_SUPPORT
12362 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12364 if ((dhd_pub
->op_mode
& DHD_FLAG_HOSTAP_MODE
) ||
12365 (ifa
->ifa_dev
->dev
!= dhd_linux_get_primary_netdev(dhd_pub
))) {
12366 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, FALSE
, idx
);
12368 #endif /* AOE_IP_ALIAS_SUPPORT */
12370 dhd_aoe_hostip_clr(&dhd
->pub
, idx
);
12371 dhd_aoe_arp_clr(&dhd
->pub
, idx
);
12376 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12377 __func__
, ifa
->ifa_label
, event
));
12380 return NOTIFY_DONE
;
12382 #endif /* ARP_OFFLOAD_SUPPORT */
12384 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12385 /* Neighbor Discovery Offload: defered handler */
12387 dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
)
12389 struct ipv6_work_info_t
*ndo_work
= (struct ipv6_work_info_t
*)event_data
;
12390 dhd_info_t
*dhd
= (dhd_info_t
*)dhd_info
;
12395 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__
));
12400 if (event
!= DHD_WQ_WORK_IPV6_NDO
) {
12401 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__
));
12406 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__
));
12410 switch (ndo_work
->event
) {
12412 #ifndef NDO_CONFIG_SUPPORT
12413 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__
));
12414 ret
= dhd_ndo_enable(dhdp
, TRUE
);
12416 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__
, ret
));
12418 #endif /* !NDO_CONFIG_SUPPORT */
12419 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__
));
12420 if (dhdp
->ndo_version
> 0) {
12421 /* inet6 addr notifier called only for unicast address */
12422 ret
= dhd_ndo_add_ip_with_type(dhdp
, &ndo_work
->ipv6_addr
[0],
12423 WL_ND_IPV6_ADDR_TYPE_UNICAST
, ndo_work
->if_idx
);
12425 ret
= dhd_ndo_add_ip(dhdp
, &ndo_work
->ipv6_addr
[0],
12429 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12430 __FUNCTION__
, ret
));
12434 if (dhdp
->ndo_version
> 0) {
12435 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__
));
12436 ret
= dhd_ndo_remove_ip_by_addr(dhdp
,
12437 &ndo_work
->ipv6_addr
[0], ndo_work
->if_idx
);
12439 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__
));
12440 ret
= dhd_ndo_remove_ip(dhdp
, ndo_work
->if_idx
);
12443 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12444 __FUNCTION__
, ret
));
12447 #ifdef NDO_CONFIG_SUPPORT
12448 if (dhdp
->ndo_host_ip_overflow
) {
12449 ret
= dhd_dev_ndo_update_inet6addr(
12450 dhd_idx2net(dhdp
, ndo_work
->if_idx
));
12451 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
12452 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12453 __FUNCTION__
, ret
));
12457 #else /* !NDO_CONFIG_SUPPORT */
12458 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__
));
12459 ret
= dhd_ndo_enable(dhdp
, FALSE
);
12461 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__
, ret
));
12464 #endif /* NDO_CONFIG_SUPPORT */
12468 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__
));
12472 /* free ndo_work. alloced while scheduling the work */
12481 * Neighbor Discovery Offload: Called when an interface
12482 * is assigned with ipv6 address.
12483 * Handles only primary interface
12485 int dhd_inet6addr_notifier_call(struct notifier_block
*this, unsigned long event
, void *ptr
)
12489 struct inet6_ifaddr
*inet6_ifa
= ptr
;
12490 struct ipv6_work_info_t
*ndo_info
;
12493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12494 /* Filter notifications meant for non Broadcom devices */
12495 if (inet6_ifa
->idev
->dev
->netdev_ops
!= &dhd_ops_pri
) {
12496 return NOTIFY_DONE
;
12498 #endif /* LINUX_VERSION_CODE */
12500 dhd
= DHD_DEV_INFO(inet6_ifa
->idev
->dev
);
12502 return NOTIFY_DONE
;
12506 /* Supports only primary interface */
12507 idx
= dhd_net2idx(dhd
, inet6_ifa
->idev
->dev
);
12509 return NOTIFY_DONE
;
12512 /* FW capability */
12513 if (!FW_SUPPORTED(dhdp
, ndoe
)) {
12514 return NOTIFY_DONE
;
12517 ndo_info
= (struct ipv6_work_info_t
*)kzalloc(sizeof(struct ipv6_work_info_t
), GFP_ATOMIC
);
12519 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__
));
12520 return NOTIFY_DONE
;
12523 /* fill up ndo_info */
12524 ndo_info
->event
= event
;
12525 ndo_info
->if_idx
= idx
;
12526 memcpy(ndo_info
->ipv6_addr
, &inet6_ifa
->addr
, IPV6_ADDR_LEN
);
12528 /* defer the work to thread as it may block kernel */
12529 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)ndo_info
, DHD_WQ_WORK_IPV6_NDO
,
12530 dhd_inet6_work_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
12531 return NOTIFY_DONE
;
12533 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12536 dhd_register_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
12538 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
12540 struct net_device
*net
= NULL
;
12542 uint8 temp_addr
[ETHER_ADDR_LEN
] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12544 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
12546 if (dhd
== NULL
|| dhd
->iflist
[ifidx
] == NULL
) {
12547 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__
));
12551 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12552 ifp
= dhd
->iflist
[ifidx
];
12554 ASSERT(net
&& (ifp
->idx
== ifidx
));
12556 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12557 ASSERT(!net
->open
);
12558 net
->get_stats
= dhd_get_stats
;
12559 net
->do_ioctl
= dhd_ioctl_entry
;
12560 net
->hard_start_xmit
= dhd_start_xmit
;
12561 net
->set_mac_address
= dhd_set_mac_address
;
12562 net
->set_multicast_list
= dhd_set_multicast_list
;
12563 net
->open
= net
->stop
= NULL
;
12565 ASSERT(!net
->netdev_ops
);
12566 net
->netdev_ops
= &dhd_ops_virt
;
12567 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12569 /* Ok, link into the network layer... */
12572 * device functions for the primary interface only
12574 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12575 net
->open
= dhd_open
;
12576 net
->stop
= dhd_stop
;
12578 net
->netdev_ops
= &dhd_ops_pri
;
12579 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12580 if (!ETHER_ISNULLADDR(dhd
->pub
.mac
.octet
))
12581 memcpy(temp_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
12584 * We have to use the primary MAC for virtual interfaces
12586 memcpy(temp_addr
, ifp
->mac_addr
, ETHER_ADDR_LEN
);
12588 * Android sets the locally administered bit to indicate that this is a
12589 * portable hotspot. This will not work in simultaneous AP/STA mode,
12590 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12592 if (!memcmp(temp_addr
, dhd
->iflist
[0]->mac_addr
,
12594 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12595 __func__
, net
->name
));
12596 temp_addr
[0] |= 0x02;
12600 net
->hard_header_len
= ETH_HLEN
+ dhd
->pub
.hdrlen
;
12601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12602 net
->ethtool_ops
= &dhd_ethtool_ops
;
12603 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12605 #if defined(WL_WIRELESS_EXT)
12606 #if WIRELESS_EXT < 19
12607 net
->get_wireless_stats
= dhd_get_wireless_stats
;
12608 #endif /* WIRELESS_EXT < 19 */
12609 #if WIRELESS_EXT > 12
12610 net
->wireless_handlers
= &wl_iw_handler_def
;
12611 #endif /* WIRELESS_EXT > 12 */
12612 #endif /* defined(WL_WIRELESS_EXT) */
12614 dhd
->pub
.rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
12617 if (ifidx
>= 2 && dhdp
->conf
->fw_type
== FW_TYPE_MESH
) {
12618 temp_addr
[4] ^= 0x80;
12619 temp_addr
[4] += ifidx
;
12620 temp_addr
[5] += ifidx
;
12623 memcpy(net
->dev_addr
, temp_addr
, ETHER_ADDR_LEN
);
12626 printf("%s\n", dhd_version
);
12627 #ifdef WL_EXT_IAPSTA
12629 wl_ext_iapsta_attach_netdev(net
, ifidx
, ifp
->bssidx
);
12632 if (_dhd_set_mac_address(dhd
, ifidx
, net
->dev_addr
) == 0)
12633 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
12635 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
12638 if (need_rtnl_lock
)
12639 err
= register_netdev(net
);
12641 err
= register_netdevice(net
);
12644 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net
->name
, err
));
12647 #ifdef WL_EXT_IAPSTA
12649 wl_ext_iapsta_attach_netdev(net
, ifidx
, ifp
->bssidx
);
12650 wl_ext_iapsta_attach_name(net
, ifidx
);
12655 printf("Register interface [%s] MAC: "MACDBG
"\n\n", net
->name
,
12656 #if defined(CUSTOMER_HW4_DEBUG)
12657 MAC2STRDBG(dhd
->pub
.mac
.octet
));
12659 MAC2STRDBG(net
->dev_addr
));
12660 #endif /* CUSTOMER_HW4_DEBUG */
12662 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12663 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
12666 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12667 KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
12669 #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
12670 up(&dhd_registration_sem
);
12671 #endif /* BCMLXSDMMC */
12672 if (!dhd_download_fw_on_driverload
) {
12674 wl_terminate_event_handler(net
);
12675 #endif /* WL_CFG80211 */
12676 #if defined(DHD_LB_RXP)
12677 __skb_queue_purge(&dhd
->rx_pend_queue
);
12678 #endif /* DHD_LB_RXP */
12680 #if defined(DHD_LB_TXP)
12681 skb_queue_purge(&dhd
->tx_pend_queue
);
12682 #endif /* DHD_LB_TXP */
12684 #ifdef SHOW_LOGTRACE
12685 /* Release the skbs from queue for WLC_E_TRACE event */
12686 dhd_event_logtrace_flush_queue(dhdp
);
12687 #endif /* SHOW_LOGTRACE */
12689 #ifdef DHDTCPACK_SUPPRESS
12690 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
12691 #endif /* DHDTCPACK_SUPPRESS */
12692 dhd_net_bus_devreset(net
, TRUE
);
12694 dhd_net_bus_suspend(net
);
12695 #endif /* BCMLXSDMMC */
12696 wifi_platform_set_power(dhdp
->info
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
12697 #if defined(BT_OVER_SDIO)
12698 dhd
->bus_user_count
--;
12699 #endif /* BT_OVER_SDIO */
12702 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12706 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12709 net
->netdev_ops
= NULL
;
12715 dhd_bus_detach(dhd_pub_t
*dhdp
)
12719 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
12722 dhd
= (dhd_info_t
*)dhdp
->info
;
12726 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12727 * calling stop again will cuase SD read/write errors.
12729 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
&& dhd_download_fw_on_driverload
) {
12730 /* Stop the protocol module */
12731 dhd_prot_stop(&dhd
->pub
);
12733 /* Stop the bus module */
12735 /* Force Dongle terminated */
12736 if (dhd_wl_ioctl_cmd(dhdp
, WLC_TERMINATED
, NULL
, 0, TRUE
, 0) < 0)
12737 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
12739 dbus_stop(dhd
->pub
.bus
);
12740 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
12742 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
12743 #endif /* BCMDBUS */
12746 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
12747 dhd_bus_oob_intr_unregister(dhdp
);
12754 void dhd_detach(dhd_pub_t
*dhdp
)
12757 unsigned long flags
;
12758 int timer_valid
= FALSE
;
12759 struct net_device
*dev
;
12761 struct bcm_cfg80211
*cfg
= NULL
;
12763 #ifdef HOFFLOAD_MODULES
12764 struct module_metadata
*hmem
= NULL
;
12769 dhd
= (dhd_info_t
*)dhdp
->info
;
12773 dev
= dhd
->iflist
[0]->net
;
12777 if (dev
->flags
& IFF_UP
) {
12778 /* If IFF_UP is still up, it indicates that
12779 * "ifconfig wlan0 down" hasn't been called.
12780 * So invoke dev_close explicitly here to
12781 * bring down the interface.
12783 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12789 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__
, dhd
->dhd_state
));
12792 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
12793 /* Give sufficient time for threads to start running in case
12794 * dhd_attach() has failed
12799 dhd_free_wet_info(&dhd
->pub
, dhd
->pub
.wet_info
);
12800 #endif /* DHD_WET */
12801 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
12802 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
12804 #ifdef PROP_TXSTATUS
12805 #ifdef DHD_WLFC_THREAD
12806 if (dhd
->pub
.wlfc_thread
) {
12807 kthread_stop(dhd
->pub
.wlfc_thread
);
12808 dhdp
->wlfc_thread_go
= TRUE
;
12809 wake_up_interruptible(&dhdp
->wlfc_wqhead
);
12811 dhd
->pub
.wlfc_thread
= NULL
;
12812 #endif /* DHD_WLFC_THREAD */
12813 #endif /* PROP_TXSTATUS */
12815 #ifdef DHD_TIMESYNC
12816 if (dhd
->dhd_state
& DHD_ATTACH_TIMESYNC_ATTACH_DONE
) {
12817 dhd_timesync_detach(dhdp
);
12819 #endif /* DHD_TIMESYNC */
12822 wl_cfg80211_down(dev
);
12824 #endif /* WL_CFG80211 */
12826 if (dhd
->dhd_state
& DHD_ATTACH_STATE_PROT_ATTACH
) {
12827 dhd_bus_detach(dhdp
);
12829 if (is_reboot
== SYS_RESTART
) {
12830 extern bcmdhd_wifi_platdata_t
*dhd_wifi_platdata
;
12831 if (dhd_wifi_platdata
&& !dhdp
->dongle_reset
) {
12832 dhdpcie_bus_clock_stop(dhdp
->bus
);
12833 wifi_platform_set_power(dhd_wifi_platdata
->adapters
,
12834 FALSE
, WIFI_TURNOFF_DELAY
);
12837 #endif /* BCMPCIE */
12838 #ifndef PCIE_FULL_DONGLE
12840 dhd_prot_detach(dhdp
);
12841 #endif /* !PCIE_FULL_DONGLE */
12844 #ifdef ARP_OFFLOAD_SUPPORT
12845 if (dhd_inetaddr_notifier_registered
) {
12846 dhd_inetaddr_notifier_registered
= FALSE
;
12847 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
12849 #endif /* ARP_OFFLOAD_SUPPORT */
12850 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12851 if (dhd_inet6addr_notifier_registered
) {
12852 dhd_inet6addr_notifier_registered
= FALSE
;
12853 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
12855 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12856 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
12857 if (dhd
->dhd_state
& DHD_ATTACH_STATE_EARLYSUSPEND_DONE
) {
12858 if (dhd
->early_suspend
.suspend
)
12859 unregister_early_suspend(&dhd
->early_suspend
);
12861 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
12863 #if defined(WL_WIRELESS_EXT)
12864 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WL_ATTACH
) {
12865 /* Detatch and unlink in the iw */
12869 wl_escan_detach(dhdp
);
12870 #endif /* WL_ESCAN */
12871 #endif /* defined(WL_WIRELESS_EXT) */
12874 dhd_ulp_deinit(dhd
->pub
.osh
, dhdp
);
12875 #endif /* DHD_ULP */
12877 /* delete all interfaces, start with virtual */
12878 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
12882 /* Cleanup virtual interfaces */
12883 dhd_net_if_lock_local(dhd
);
12884 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
12885 if (dhd
->iflist
[i
]) {
12886 dhd_remove_if(&dhd
->pub
, i
, TRUE
);
12889 dhd_net_if_unlock_local(dhd
);
12891 /* delete primary interface 0 */
12892 ifp
= dhd
->iflist
[0];
12895 if (ifp
&& ifp
->net
) {
12897 cfg
= wl_get_cfg(ifp
->net
);
12899 /* in unregister_netdev case, the interface gets freed by net->destructor
12900 * (which is set to free_netdev)
12902 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
12903 free_netdev(ifp
->net
);
12905 argos_register_notifier_deinit();
12906 #ifdef SET_RPS_CPUS
12907 custom_rps_map_clear(ifp
->net
->_rx
);
12908 #endif /* SET_RPS_CPUS */
12909 netif_tx_disable(ifp
->net
);
12910 unregister_netdev(ifp
->net
);
12912 #ifdef PCIE_FULL_DONGLE
12913 ifp
->net
= DHD_NET_DEV_NULL
;
12916 #endif /* PCIE_FULL_DONGLE */
12919 dhd_wmf_cleanup(dhdp
, 0);
12920 #endif /* DHD_WMF */
12921 #ifdef DHD_L2_FILTER
12922 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
,
12923 NULL
, FALSE
, dhdp
->tickcnt
);
12924 deinit_l2_filter_arp_table(dhdp
->osh
, ifp
->phnd_arp_table
);
12925 ifp
->phnd_arp_table
= NULL
;
12926 #endif /* DHD_L2_FILTER */
12929 dhd_if_del_sta_list(ifp
);
12931 MFREE(dhd
->pub
.osh
, ifp
, sizeof(*ifp
));
12932 dhd
->iflist
[0] = NULL
;
12936 /* Clear the watchdog timer */
12937 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
12938 timer_valid
= dhd
->wd_timer_valid
;
12939 dhd
->wd_timer_valid
= FALSE
;
12940 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
12942 del_timer_sync(&dhd
->timer
);
12943 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
12946 tasklet_kill(&dhd
->tasklet
);
12948 if (dhd
->dhd_state
& DHD_ATTACH_STATE_THREADS_CREATED
) {
12949 #ifdef DHD_PCIE_RUNTIMEPM
12950 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
12951 PROC_STOP(&dhd
->thr_rpm_ctl
);
12953 #endif /* DHD_PCIE_RUNTIMEPM */
12954 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
12955 PROC_STOP(&dhd
->thr_wdt_ctl
);
12958 if (dhd
->rxthread_enabled
&& dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
12959 PROC_STOP(&dhd
->thr_rxf_ctl
);
12962 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
12963 PROC_STOP(&dhd
->thr_dpc_ctl
);
12966 tasklet_kill(&dhd
->tasklet
);
12969 #endif /* BCMDBUS */
12972 if (dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
) {
12973 /* Clear the flag first to avoid calling the cpu notifier */
12974 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_LB_ATTACH_DONE
;
12976 /* Kill the Load Balancing Tasklets */
12978 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
12979 __skb_queue_purge(&dhd
->rx_pend_queue
);
12980 #endif /* DHD_LB_RXP */
12982 cancel_work_sync(&dhd
->tx_dispatcher_work
);
12983 tasklet_kill(&dhd
->tx_tasklet
);
12984 __skb_queue_purge(&dhd
->tx_pend_queue
);
12985 #endif /* DHD_LB_TXP */
12987 cancel_work_sync(&dhd
->tx_compl_dispatcher_work
);
12988 tasklet_kill(&dhd
->tx_compl_tasklet
);
12989 #endif /* DHD_LB_TXC */
12991 tasklet_kill(&dhd
->rx_compl_tasklet
);
12992 #endif /* DHD_LB_RXC */
12994 if (dhd
->cpu_notifier
.notifier_call
!= NULL
) {
12995 unregister_cpu_notifier(&dhd
->cpu_notifier
);
12997 dhd_cpumasks_deinit(dhd
);
12998 DHD_LB_STATS_DEINIT(&dhd
->pub
);
13000 #endif /* DHD_LB */
13002 DHD_SSSR_MEMPOOL_DEINIT(&dhd
->pub
);
13004 #ifdef DHD_LOG_DUMP
13005 dhd_log_dump_deinit(&dhd
->pub
);
13006 #endif /* DHD_LOG_DUMP */
13008 if (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
) {
13010 DHD_ERROR(("cfg NULL!\n"));
13013 wl_cfg80211_detach(cfg
);
13014 dhd_monitor_uninit();
13019 #ifdef DEBUGABILITY
13022 dhd_os_dbg_detach_pkt_monitor(dhdp
);
13023 dhd_os_spin_lock_deinit(dhd
->pub
.osh
, dhd
->pub
.dbg
->pkt_mon_lock
);
13024 #endif /* DBG_PKT_MON */
13025 dhd_os_dbg_detach(dhdp
);
13027 #endif /* DEBUGABILITY */
13028 #ifdef SHOW_LOGTRACE
13029 #ifdef DHD_PKT_LOGGING
13030 dhd_os_detach_pktlog(dhdp
);
13031 #endif /* DHD_PKT_LOGGING */
13032 /* Release the skbs from queue for WLC_E_TRACE event */
13033 dhd_event_logtrace_flush_queue(dhdp
);
13035 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
13036 if (dhd
->event_data
.fmts
) {
13037 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
13038 dhd
->event_data
.fmts_size
);
13039 dhd
->event_data
.fmts
= NULL
;
13041 if (dhd
->event_data
.raw_fmts
) {
13042 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
13043 dhd
->event_data
.raw_fmts_size
);
13044 dhd
->event_data
.raw_fmts
= NULL
;
13046 if (dhd
->event_data
.raw_sstr
) {
13047 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
13048 dhd
->event_data
.raw_sstr_size
);
13049 dhd
->event_data
.raw_sstr
= NULL
;
13051 if (dhd
->event_data
.rom_raw_sstr
) {
13052 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
13053 dhd
->event_data
.rom_raw_sstr_size
);
13054 dhd
->event_data
.rom_raw_sstr
= NULL
;
13056 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
13058 #endif /* SHOW_LOGTRACE */
13060 if (dhdp
->extended_trap_data
)
13062 MFREE(dhdp
->osh
, dhdp
->extended_trap_data
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
13063 dhdp
->extended_trap_data
= NULL
;
13065 #endif /* BCMPCIE */
13067 if (dhdp
->pno_state
)
13068 dhd_pno_deinit(dhdp
);
13071 if (dhdp
->rtt_state
) {
13072 dhd_rtt_deinit(dhdp
);
13075 #if defined(CONFIG_PM_SLEEP)
13076 if (dhd_pm_notifier_registered
) {
13077 unregister_pm_notifier(&dhd
->pm_notifier
);
13078 dhd_pm_notifier_registered
= FALSE
;
13080 #endif /* CONFIG_PM_SLEEP */
13082 #ifdef DEBUG_CPU_FREQ
13084 free_percpu(dhd
->new_freq
);
13085 dhd
->new_freq
= NULL
;
13086 cpufreq_unregister_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
13088 DHD_TRACE(("wd wakelock count:%d\n", dhd
->wakelock_wd_counter
));
13089 #ifdef CONFIG_HAS_WAKELOCK
13090 dhd
->wakelock_wd_counter
= 0;
13091 wake_lock_destroy(&dhd
->wl_wdwake
);
13092 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
13093 wake_lock_destroy(&dhd
->wl_wifi
);
13094 #endif /* CONFIG_HAS_WAKELOCK */
13095 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) {
13096 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
13101 #ifdef DHDTCPACK_SUPPRESS
13102 /* This will free all MEM allocated for TCPACK SUPPRESS */
13103 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
13104 #endif /* DHDTCPACK_SUPPRESS */
13106 #ifdef PCIE_FULL_DONGLE
13107 dhd_flow_rings_deinit(dhdp
);
13109 dhd_prot_detach(dhdp
);
13112 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13113 dhd_free_tdls_peer_list(dhdp
);
13116 #ifdef HOFFLOAD_MODULES
13117 hmem
= &dhdp
->hmem
;
13118 dhd_free_module_memory(dhdp
->bus
, hmem
);
13119 #endif /* HOFFLOAD_MODULES */
13120 #if defined(BT_OVER_SDIO)
13121 mutex_destroy(&dhd
->bus_user_lock
);
13122 #endif /* BT_OVER_SDIO */
13123 #ifdef DUMP_IOCTL_IOV_LIST
13124 dhd_iov_li_delete(dhdp
, &(dhdp
->dump_iovlist_head
));
13125 #endif /* DUMP_IOCTL_IOV_LIST */
13127 /* memory waste feature list initilization */
13128 dhd_mw_list_delete(dhdp
, &(dhdp
->mw_list_head
));
13129 #endif /* DHD_DEBUG */
13131 dhd_del_monitor_if(dhd
, NULL
, DHD_WQ_WORK_IF_DEL
);
13132 #endif /* WL_MONITOR */
13134 /* Prefer adding de-init code above this comment unless necessary.
13135 * The idea is to cancel work queue, sysfs and flags at the end.
13137 dhd_deferred_work_deinit(dhd
->dhd_deferred_wq
);
13138 dhd
->dhd_deferred_wq
= NULL
;
13140 #ifdef SHOW_LOGTRACE
13141 /* Wait till event_log_dispatcher_work finishes */
13142 cancel_work_sync(&dhd
->event_log_dispatcher_work
);
13143 #endif /* SHOW_LOGTRACE */
13145 dhd_sysfs_exit(dhd
);
13146 dhd
->pub
.fw_download_done
= FALSE
;
13147 dhd_conf_detach(dhdp
);
13152 dhd_free(dhd_pub_t
*dhdp
)
13155 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13159 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13160 if (dhdp
->reorder_bufs
[i
]) {
13161 reorder_info_t
*ptr
;
13162 uint32 buf_size
= sizeof(struct reorder_info
);
13164 ptr
= dhdp
->reorder_bufs
[i
];
13166 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13167 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13168 i
, ptr
->max_idx
, buf_size
));
13170 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13171 dhdp
->reorder_bufs
[i
] = NULL
;
13175 dhd_sta_pool_fini(dhdp
, DHD_MAX_STA
);
13177 dhd
= (dhd_info_t
*)dhdp
->info
;
13178 if (dhdp
->soc_ram
) {
13179 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13180 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13182 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13183 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13184 dhdp
->soc_ram
= NULL
;
13186 #ifdef CACHE_FW_IMAGES
13187 if (dhdp
->cached_fw
) {
13188 MFREE(dhdp
->osh
, dhdp
->cached_fw
, dhdp
->bus
->ramsize
);
13189 dhdp
->cached_fw
= NULL
;
13192 if (dhdp
->cached_nvram
) {
13193 MFREE(dhdp
->osh
, dhdp
->cached_nvram
, MAX_NVRAMBUF_SIZE
);
13194 dhdp
->cached_nvram
= NULL
;
13198 #ifdef REPORT_FATAL_TIMEOUTS
13199 deinit_dhd_timeouts(&dhd
->pub
);
13200 #endif /* REPORT_FATAL_TIMEOUTS */
13202 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13203 if (dhd
!= (dhd_info_t
*)dhd_os_prealloc(dhdp
,
13204 DHD_PREALLOC_DHD_INFO
, 0, FALSE
))
13205 MFREE(dhd
->pub
.osh
, dhd
, sizeof(*dhd
));
13212 dhd_clear(dhd_pub_t
*dhdp
)
13214 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13218 #ifdef DHDTCPACK_SUPPRESS
13219 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13220 dhd_tcpack_info_tbl_clean(dhdp
);
13221 #endif /* DHDTCPACK_SUPPRESS */
13222 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13223 if (dhdp
->reorder_bufs
[i
]) {
13224 reorder_info_t
*ptr
;
13225 uint32 buf_size
= sizeof(struct reorder_info
);
13227 ptr
= dhdp
->reorder_bufs
[i
];
13229 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13230 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13231 i
, ptr
->max_idx
, buf_size
));
13233 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13234 dhdp
->reorder_bufs
[i
] = NULL
;
13238 dhd_sta_pool_clear(dhdp
, DHD_MAX_STA
);
13240 if (dhdp
->soc_ram
) {
13241 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13242 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13244 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13245 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13246 dhdp
->soc_ram
= NULL
;
13252 dhd_module_cleanup(void)
13254 printf("%s: Enter\n", __FUNCTION__
);
13256 dhd_bus_unregister();
13260 dhd_wifi_platform_unregister_drv();
13261 #ifdef CUSTOMER_HW_AMLOGIC
13262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13263 wifi_teardown_dt();
13266 printf("%s: Exit\n", __FUNCTION__
);
13270 dhd_module_exit(void)
13272 atomic_set(&exit_in_progress
, 1);
13273 dhd_module_cleanup();
13274 unregister_reboot_notifier(&dhd_reboot_notifier
);
13275 dhd_destroy_to_notifier_skt();
13279 dhd_module_init(void)
13282 int retry
= POWERUP_MAX_RETRY
;
13284 printf("%s: in %s\n", __FUNCTION__
, dhd_version
);
13285 #ifdef CUSTOMER_HW_AMLOGIC
13286 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13287 if (wifi_setup_dt()) {
13288 printf("wifi_dt : fail to setup dt\n");
13293 DHD_PERIM_RADIO_INIT();
13296 if (firmware_path
[0] != '\0') {
13297 strncpy(fw_bak_path
, firmware_path
, MOD_PARAM_PATHLEN
);
13298 fw_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13301 if (nvram_path
[0] != '\0') {
13302 strncpy(nv_bak_path
, nvram_path
, MOD_PARAM_PATHLEN
);
13303 nv_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13307 err
= dhd_wifi_platform_register_drv();
13309 register_reboot_notifier(&dhd_reboot_notifier
);
13312 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13313 __FUNCTION__
, retry
));
13314 strncpy(firmware_path
, fw_bak_path
, MOD_PARAM_PATHLEN
);
13315 firmware_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13316 strncpy(nvram_path
, nv_bak_path
, MOD_PARAM_PATHLEN
);
13317 nvram_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13321 dhd_create_to_notifier_skt();
13324 #ifdef CUSTOMER_HW_AMLOGIC
13325 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13326 wifi_teardown_dt();
13329 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__
));
13331 if (!dhd_download_fw_on_driverload
) {
13332 dhd_driver_init_done
= TRUE
;
13336 printf("%s: Exit err=%d\n", __FUNCTION__
, err
);
13341 dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
)
13343 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__
, code
));
13344 if (code
== SYS_RESTART
) {
13347 #endif /* BCMPCIE */
13349 return NOTIFY_DONE
;
13353 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13354 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13355 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13356 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \
13357 defined(CONFIG_ARCH_MSM8998)
13358 deferred_module_init_sync(dhd_module_init
);
13360 deferred_module_init(dhd_module_init
);
13361 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13362 * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998
13364 #elif defined(USE_LATE_INITCALL_SYNC)
13365 late_initcall_sync(dhd_module_init
);
13367 late_initcall(dhd_module_init
);
13368 #endif /* USE_LATE_INITCALL_SYNC */
13370 module_init(dhd_module_init
);
13371 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13373 module_exit(dhd_module_exit
);
13376 * OS specific functions required to implement DHD driver in OS independent way
13379 dhd_os_proto_block(dhd_pub_t
*pub
)
13381 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13384 DHD_PERIM_UNLOCK(pub
);
13386 down(&dhd
->proto_sem
);
13388 DHD_PERIM_LOCK(pub
);
13396 dhd_os_proto_unblock(dhd_pub_t
*pub
)
13398 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13401 up(&dhd
->proto_sem
);
13409 dhd_os_dhdiovar_lock(dhd_pub_t
*pub
)
13411 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13414 mutex_lock(&dhd
->dhd_iovar_mutex
);
13419 dhd_os_dhdiovar_unlock(dhd_pub_t
*pub
)
13421 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13424 mutex_unlock(&dhd
->dhd_iovar_mutex
);
13429 dhd_os_get_ioctl_resp_timeout(void)
13431 return ((unsigned int)dhd_ioctl_timeout_msec
);
13435 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec
)
13437 dhd_ioctl_timeout_msec
= (int)timeout_msec
;
13441 dhd_os_ioctl_resp_wait(dhd_pub_t
*pub
, uint
*condition
, bool resched
)
13443 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13444 int timeout
, timeout_tmp
= dhd_ioctl_timeout_msec
;
13446 if (!resched
&& pub
->conf
->ctrl_resched
>0 && pub
->conf
->dhd_ioctl_timeout_msec
>0) {
13447 timeout_tmp
= dhd_ioctl_timeout_msec
;
13448 dhd_ioctl_timeout_msec
= pub
->conf
->dhd_ioctl_timeout_msec
;
13451 /* Convert timeout in millsecond to jiffies */
13452 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13453 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
13455 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
13458 DHD_PERIM_UNLOCK(pub
);
13460 timeout
= wait_event_timeout(dhd
->ioctl_resp_wait
, (*condition
), timeout
);
13462 if (!resched
&& pub
->conf
->ctrl_resched
>0 && pub
->conf
->dhd_ioctl_timeout_msec
>0) {
13463 dhd_ioctl_timeout_msec
= timeout_tmp
;
13466 DHD_PERIM_LOCK(pub
);
13472 dhd_os_ioctl_resp_wake(dhd_pub_t
*pub
)
13474 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13476 wake_up(&dhd
->ioctl_resp_wait
);
13481 dhd_os_d3ack_wait(dhd_pub_t
*pub
, uint
*condition
)
13483 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13486 /* Convert timeout in millsecond to jiffies */
13487 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13488 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
13490 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
13493 DHD_PERIM_UNLOCK(pub
);
13495 timeout
= wait_event_timeout(dhd
->d3ack_wait
, (*condition
), timeout
);
13497 DHD_PERIM_LOCK(pub
);
13504 dhd_os_ds_exit_wait(dhd_pub_t
*pub
, uint
*condition
)
13506 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13509 /* Convert timeout in millsecond to jiffies */
13510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13511 timeout
= msecs_to_jiffies(ds_exit_timeout_msec
);
13513 timeout
= ds_exit_timeout_msec
* HZ
/ 1000;
13516 DHD_PERIM_UNLOCK(pub
);
13518 timeout
= wait_event_timeout(dhd
->ds_exit_wait
, (*condition
), timeout
);
13520 DHD_PERIM_LOCK(pub
);
13526 dhd_os_ds_exit_wake(dhd_pub_t
*pub
)
13528 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13530 wake_up(&dhd
->ds_exit_wait
);
13534 #endif /* PCIE_INB_DW */
13537 dhd_os_d3ack_wake(dhd_pub_t
*pub
)
13539 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13541 wake_up(&dhd
->d3ack_wait
);
13546 dhd_os_busbusy_wait_negation(dhd_pub_t
*pub
, uint
*condition
)
13548 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13551 /* Wait for bus usage contexts to gracefully exit within some timeout value
13552 * Set time out to little higher than dhd_ioctl_timeout_msec,
13553 * so that IOCTL timeout should not get affected.
13555 /* Convert timeout in millsecond to jiffies */
13556 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13557 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13559 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13562 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, !(*condition
), timeout
);
13568 * Wait until the condition *var == condition is met.
13569 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13570 * Returns 1 if the @condition evaluated to true
13573 dhd_os_busbusy_wait_condition(dhd_pub_t
*pub
, uint
*var
, uint condition
)
13575 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13578 /* Convert timeout in millsecond to jiffies */
13579 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13580 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13582 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13585 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, (*var
== condition
), timeout
);
13591 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13592 /* Fix compilation error for FC11 */
13596 dhd_os_busbusy_wake(dhd_pub_t
*pub
)
13598 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13599 /* Call wmb() to make sure before waking up the other event value gets updated */
13601 wake_up(&dhd
->dhd_bus_busy_state_wait
);
13606 dhd_os_wd_timer_extend(void *bus
, bool extend
)
13609 dhd_pub_t
*pub
= bus
;
13610 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13613 dhd_os_wd_timer(bus
, WATCHDOG_EXTEND_INTERVAL
);
13615 dhd_os_wd_timer(bus
, dhd
->default_wd_interval
);
13616 #endif /* !BCMDBUS */
13621 dhd_os_wd_timer(void *bus
, uint wdtick
)
13624 dhd_pub_t
*pub
= bus
;
13625 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13626 unsigned long flags
;
13628 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13631 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
13635 DHD_GENERAL_LOCK(pub
, flags
);
13637 /* don't start the wd until fw is loaded */
13638 if (pub
->busstate
== DHD_BUS_DOWN
) {
13639 DHD_GENERAL_UNLOCK(pub
, flags
);
13643 /* Totally stop the timer */
13644 if (!wdtick
&& dhd
->wd_timer_valid
== TRUE
) {
13645 dhd
->wd_timer_valid
= FALSE
;
13646 DHD_GENERAL_UNLOCK(pub
, flags
);
13647 del_timer_sync(&dhd
->timer
);
13652 dhd_watchdog_ms
= (uint
)wdtick
;
13653 /* Re arm the timer, at last watchdog period */
13654 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
13655 dhd
->wd_timer_valid
= TRUE
;
13657 DHD_GENERAL_UNLOCK(pub
, flags
);
13658 #endif /* !BCMDBUS */
13661 #ifdef DHD_PCIE_RUNTIMEPM
13663 dhd_os_runtimepm_timer(void *bus
, uint tick
)
13665 dhd_pub_t
*pub
= bus
;
13666 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13667 unsigned long flags
;
13669 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13672 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
13676 DHD_GENERAL_LOCK(pub
, flags
);
13678 /* don't start the RPM until fw is loaded */
13679 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub
)) {
13680 DHD_GENERAL_UNLOCK(pub
, flags
);
13684 /* If tick is non-zero, the request is to start the timer */
13686 /* Start the timer only if its not already running */
13687 if (dhd
->rpm_timer_valid
== FALSE
) {
13688 mod_timer(&dhd
->rpm_timer
, jiffies
+ msecs_to_jiffies(dhd_runtimepm_ms
));
13689 dhd
->rpm_timer_valid
= TRUE
;
13692 /* tick is zero, we have to stop the timer */
13693 /* Stop the timer only if its running, otherwise we don't have to do anything */
13694 if (dhd
->rpm_timer_valid
== TRUE
) {
13695 dhd
->rpm_timer_valid
= FALSE
;
13696 DHD_GENERAL_UNLOCK(pub
, flags
);
13697 del_timer_sync(&dhd
->rpm_timer
);
13698 /* we have already released the lock, so just go to exit */
13703 DHD_GENERAL_UNLOCK(pub
, flags
);
13709 #endif /* DHD_PCIE_RUNTIMEPM */
13712 dhd_os_open_image(char *filename
)
13717 fp
= filp_open(filename
, O_RDONLY
, 0);
13719 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13721 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13729 if (!S_ISREG(file_inode(fp
)->i_mode
)) {
13730 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__
, filename
));
13735 size
= i_size_read(file_inode(fp
));
13737 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__
, filename
, size
));
13742 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__
, filename
, size
));
13749 dhd_os_get_image_block(char *buf
, int len
, void *image
)
13751 struct file
*fp
= (struct file
*)image
;
13759 size
= i_size_read(file_inode(fp
));
13760 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13761 rdlen
= kernel_read(fp
, buf
, MIN(len
, size
), &fp
->f_pos
);
13763 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, MIN(len
, size
));
13766 if (len
>= size
&& size
!= rdlen
) {
13770 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
13772 fp
->f_pos
+= rdlen
;
13780 dhd_os_get_image_size(void *image
)
13782 struct file
*fp
= (struct file
*)image
;
13788 size
= i_size_read(file_inode(fp
));
13793 #if defined(BT_OVER_SDIO)
13795 dhd_os_gets_image(dhd_pub_t
*pub
, char *str
, int len
, void *image
)
13797 struct file
*fp
= (struct file
*)image
;
13800 char *str_end
= NULL
;
13805 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13806 rd_len
= kernel_read(fp
, str
, len
, &fp
->f_pos
);
13808 rd_len
= kernel_read(fp
, fp
->f_pos
, str
, len
);
13810 str_end
= strnchr(str
, len
, '\n');
13811 if (str_end
== NULL
) {
13814 str_len
= (uint
)(str_end
- str
);
13816 /* Advance file pointer past the string length */
13817 fp
->f_pos
+= str_len
+ 1;
13818 bzero(str_end
, rd_len
- str_len
);
13823 #endif /* defined (BT_OVER_SDIO) */
13827 dhd_os_close_image(void *image
)
13830 filp_close((struct file
*)image
, NULL
);
13834 dhd_os_sdlock(dhd_pub_t
*pub
)
13838 dhd
= (dhd_info_t
*)(pub
->info
);
13841 spin_lock_bh(&dhd
->sdlock
);
13843 if (dhd_dpc_prio
>= 0)
13846 spin_lock_bh(&dhd
->sdlock
);
13847 #endif /* !BCMDBUS */
13851 dhd_os_sdunlock(dhd_pub_t
*pub
)
13855 dhd
= (dhd_info_t
*)(pub
->info
);
13858 spin_unlock_bh(&dhd
->sdlock
);
13860 if (dhd_dpc_prio
>= 0)
13863 spin_unlock_bh(&dhd
->sdlock
);
13864 #endif /* !BCMDBUS */
13868 dhd_os_sdlock_txq(dhd_pub_t
*pub
)
13872 dhd
= (dhd_info_t
*)(pub
->info
);
13874 spin_lock_irqsave(&dhd
->txqlock
, dhd
->txqlock_flags
);
13876 spin_lock_bh(&dhd
->txqlock
);
13877 #endif /* BCMDBUS */
13881 dhd_os_sdunlock_txq(dhd_pub_t
*pub
)
13885 dhd
= (dhd_info_t
*)(pub
->info
);
13887 spin_unlock_irqrestore(&dhd
->txqlock
, dhd
->txqlock_flags
);
13889 spin_unlock_bh(&dhd
->txqlock
);
13890 #endif /* BCMDBUS */
13894 dhd_os_sdlock_rxq(dhd_pub_t
*pub
)
13899 dhd
= (dhd_info_t
*)(pub
->info
);
13900 spin_lock_bh(&dhd
->rxqlock
);
13905 dhd_os_sdunlock_rxq(dhd_pub_t
*pub
)
13910 dhd
= (dhd_info_t
*)(pub
->info
);
13911 spin_unlock_bh(&dhd
->rxqlock
);
13916 dhd_os_rxflock(dhd_pub_t
*pub
)
13920 dhd
= (dhd_info_t
*)(pub
->info
);
13921 spin_lock_bh(&dhd
->rxf_lock
);
13926 dhd_os_rxfunlock(dhd_pub_t
*pub
)
13930 dhd
= (dhd_info_t
*)(pub
->info
);
13931 spin_unlock_bh(&dhd
->rxf_lock
);
13934 #ifdef DHDTCPACK_SUPPRESS
13936 dhd_os_tcpacklock(dhd_pub_t
*pub
)
13939 unsigned long flags
= 0;
13941 dhd
= (dhd_info_t
*)(pub
->info
);
13945 spin_lock_bh(&dhd
->tcpack_lock
);
13947 spin_lock_irqsave(&dhd
->tcpack_lock
, flags
);
13948 #endif /* BCMSDIO */
13955 dhd_os_tcpackunlock(dhd_pub_t
*pub
, unsigned long flags
)
13960 BCM_REFERENCE(flags
);
13961 #endif /* BCMSDIO */
13963 dhd
= (dhd_info_t
*)(pub
->info
);
13967 spin_unlock_bh(&dhd
->tcpack_lock
);
13969 spin_unlock_irqrestore(&dhd
->tcpack_lock
, flags
);
13970 #endif /* BCMSDIO */
13973 #endif /* DHDTCPACK_SUPPRESS */
13975 uint8
* dhd_os_prealloc(dhd_pub_t
*dhdpub
, int section
, uint size
, bool kmalloc_if_fail
)
13978 gfp_t flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
13980 buf
= (uint8
*)wifi_platform_prealloc(dhdpub
->info
->adapter
, section
, size
);
13981 if (buf
== NULL
&& kmalloc_if_fail
)
13982 buf
= kmalloc(size
, flags
);
13987 void dhd_os_prefree(dhd_pub_t
*dhdpub
, void *addr
, uint size
)
13991 #if defined(WL_WIRELESS_EXT)
13992 struct iw_statistics
*
13993 dhd_get_wireless_stats(struct net_device
*dev
)
13996 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
13998 if (!dhd
->pub
.up
) {
14002 res
= wl_iw_get_wireless_stats(dev
, &dhd
->iw
.wstats
);
14005 return &dhd
->iw
.wstats
;
14009 #endif /* defined(WL_WIRELESS_EXT) */
14012 dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
14013 wl_event_msg_t
*event
, void **data
)
14017 unsigned long flags
= 0;
14018 #endif /* WL_CFG80211 */
14019 ASSERT(dhd
!= NULL
);
14021 #ifdef SHOW_LOGTRACE
14022 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14025 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14027 #endif /* SHOW_LOGTRACE */
14029 if (bcmerror
!= BCME_OK
)
14032 #if defined(WL_EXT_IAPSTA)
14033 wl_ext_iapsta_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14034 #endif /* defined(WL_EXT_IAPSTA) */
14035 #if defined(WL_WIRELESS_EXT)
14036 if (event
->bsscfgidx
== 0) {
14038 * Wireless ext is on primary interface only
14041 ASSERT(dhd
->iflist
[ifidx
] != NULL
);
14042 ASSERT(dhd
->iflist
[ifidx
]->net
!= NULL
);
14044 if (dhd
->iflist
[ifidx
]->net
) {
14045 wl_iw_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14048 #endif /* defined(WL_WIRELESS_EXT) */
14051 ASSERT(dhd
->iflist
[ifidx
] != NULL
);
14052 ASSERT(dhd
->iflist
[ifidx
]->net
!= NULL
);
14053 if (dhd
->iflist
[ifidx
]->net
) {
14054 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
14056 wl_cfg80211_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14058 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
14060 #endif /* defined(WL_CFG80211) */
14065 /* send up locally generated event */
14067 dhd_sendup_event(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
, void *data
)
14069 /* Just return from here */
14073 #ifdef LOG_INTO_TCPDUMP
14075 dhd_sendup_log(dhd_pub_t
*dhdp
, void *data
, int data_len
)
14077 struct sk_buff
*p
, *skb
;
14084 struct ether_header eth
;
14086 pktlen
= sizeof(eth
) + data_len
;
14089 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
14090 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
14092 bcopy(&dhdp
->mac
, ð
.ether_dhost
, ETHER_ADDR_LEN
);
14093 bcopy(&dhdp
->mac
, ð
.ether_shost
, ETHER_ADDR_LEN
);
14094 ETHER_TOGGLE_LOCALADDR(ð
.ether_shost
);
14095 eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
14097 bcopy((void *)ð
, PKTDATA(dhdp
->osh
, p
), sizeof(eth
));
14098 bcopy(data
, PKTDATA(dhdp
->osh
, p
) + sizeof(eth
), data_len
);
14099 skb
= PKTTONATIVE(dhdp
->osh
, p
);
14100 skb_data
= skb
->data
;
14103 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
14104 ifp
= dhd
->iflist
[ifidx
];
14106 ifp
= dhd
->iflist
[0];
14109 skb
->dev
= ifp
->net
;
14110 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
14111 skb
->data
= skb_data
;
14114 /* Strip header, count, deliver upward */
14115 skb_pull(skb
, ETH_HLEN
);
14117 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
14118 __FUNCTION__
, __LINE__
);
14119 /* Send the packet */
14120 if (in_interrupt()) {
14126 /* Could not allocate a sk_buf */
14127 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__
));
14130 #endif /* LOG_INTO_TCPDUMP */
14132 void dhd_wait_for_event(dhd_pub_t
*dhd
, bool *lockvar
)
14134 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14135 struct dhd_info
*dhdinfo
= dhd
->info
;
14137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14138 int timeout
= msecs_to_jiffies(IOCTL_RESP_TIMEOUT
);
14140 int timeout
= (IOCTL_RESP_TIMEOUT
/ 1000) * HZ
;
14141 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14143 dhd_os_sdunlock(dhd
);
14144 wait_event_timeout(dhdinfo
->ctrl_wait
, (*lockvar
== FALSE
), timeout
);
14145 dhd_os_sdlock(dhd
);
14146 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14150 void dhd_wait_event_wakeup(dhd_pub_t
*dhd
)
14152 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14153 struct dhd_info
*dhdinfo
= dhd
->info
;
14154 if (waitqueue_active(&dhdinfo
->ctrl_wait
))
14155 wake_up(&dhdinfo
->ctrl_wait
);
14160 #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
14162 dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
)
14166 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14168 if (flag
== TRUE
) {
14169 /* Issue wl down command before resetting the chip */
14170 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
14171 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__
));
14173 #ifdef PROP_TXSTATUS
14174 if (dhd
->pub
.wlfc_enabled
) {
14175 dhd_wlfc_deinit(&dhd
->pub
);
14177 #endif /* PROP_TXSTATUS */
14179 if (dhd
->pub
.pno_state
) {
14180 dhd_pno_deinit(&dhd
->pub
);
14184 if (dhd
->pub
.rtt_state
) {
14185 dhd_rtt_deinit(&dhd
->pub
);
14187 #endif /* RTT_SUPPORT */
14189 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14190 dhd_os_dbg_detach_pkt_monitor(&dhd
->pub
);
14191 #endif /* DBG_PKT_MON */
14196 dhd_update_fw_nv_path(dhd
);
14197 /* update firmware and nvram path to sdio bus */
14198 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
14199 dhd
->fw_path
, dhd
->nv_path
, dhd
->clm_path
, dhd
->conf_path
);
14201 #endif /* BCMSDIO */
14203 ret
= dhd_bus_devreset(&dhd
->pub
, flag
);
14205 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__
, ret
));
14214 dhd_net_bus_suspend(struct net_device
*dev
)
14216 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14217 return dhd_bus_suspend(&dhd
->pub
);
14221 dhd_net_bus_resume(struct net_device
*dev
, uint8 stage
)
14223 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14224 return dhd_bus_resume(&dhd
->pub
, stage
);
14227 #endif /* BCMSDIO */
14228 #endif /* BCMSDIO || BCMPCIE || BCMDBUS */
14230 int net_os_set_suspend_disable(struct net_device
*dev
, int val
)
14232 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14236 ret
= dhd
->pub
.suspend_disable_flag
;
14237 dhd
->pub
.suspend_disable_flag
= val
;
14242 int net_os_set_suspend(struct net_device
*dev
, int val
, int force
)
14245 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14248 #ifdef CONFIG_MACH_UNIVERSAL7420
14249 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14250 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14251 ret
= dhd_set_suspend(val
, &dhd
->pub
);
14253 ret
= dhd_suspend_resume_helper(dhd
, val
, force
);
14256 wl_cfg80211_update_power_mode(dev
);
14262 int net_os_set_suspend_bcn_li_dtim(struct net_device
*dev
, int val
)
14264 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14267 dhd
->pub
.suspend_bcn_li_dtim
= val
;
14272 int net_os_set_max_dtim_enable(struct net_device
*dev
, int val
)
14274 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14277 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14278 __FUNCTION__
, (val
? "Enable" : "Disable")));
14280 dhd
->pub
.max_dtim_enable
= TRUE
;
14282 dhd
->pub
.max_dtim_enable
= FALSE
;
14291 #ifdef PKT_FILTER_SUPPORT
14292 int net_os_rxfilter_add_remove(struct net_device
*dev
, int add_remove
, int num
)
14296 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14297 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14299 if (!dhd_master_mode
)
14300 add_remove
= !add_remove
;
14301 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__
, add_remove
, num
));
14302 if (!dhd
|| (num
== DHD_UNICAST_FILTER_NUM
)) {
14307 if (num
>= dhd
->pub
.pktfilter_count
) {
14311 ret
= dhd_packet_filter_add_remove(&dhd
->pub
, add_remove
, num
);
14312 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14317 int dhd_os_enable_packet_filter(dhd_pub_t
*dhdp
, int val
)
14322 /* Packet filtering is set only if we still in early-suspend and
14323 * we need either to turn it ON or turn it OFF
14324 * We can always turn it OFF in case of early-suspend, but we turn it
14325 * back ON only if suspend_disable_flag was not set
14327 if (dhdp
&& dhdp
->up
) {
14328 if (dhdp
->in_suspend
) {
14329 if (!val
|| (val
&& !dhdp
->suspend_disable_flag
))
14330 dhd_enable_packet_filter(val
, dhdp
);
14336 /* function to enable/disable packet for Network device */
14337 int net_os_enable_packet_filter(struct net_device
*dev
, int val
)
14339 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14341 DHD_ERROR(("%s: val = %d\n", __FUNCTION__
, val
));
14342 return dhd_os_enable_packet_filter(&dhd
->pub
, val
);
14344 #endif /* PKT_FILTER_SUPPORT */
14347 dhd_dev_init_ioctl(struct net_device
*dev
)
14349 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14352 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0)
14360 dhd_dev_get_feature_set(struct net_device
*dev
)
14362 dhd_info_t
*ptr
= *(dhd_info_t
**)netdev_priv(dev
);
14363 dhd_pub_t
*dhd
= (&ptr
->pub
);
14364 int feature_set
= 0;
14366 if (FW_SUPPORTED(dhd
, sta
))
14367 feature_set
|= WIFI_FEATURE_INFRA
;
14368 if (FW_SUPPORTED(dhd
, dualband
))
14369 feature_set
|= WIFI_FEATURE_INFRA_5G
;
14370 if (FW_SUPPORTED(dhd
, p2p
))
14371 feature_set
|= WIFI_FEATURE_P2P
;
14372 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
)
14373 feature_set
|= WIFI_FEATURE_SOFT_AP
;
14374 if (FW_SUPPORTED(dhd
, tdls
))
14375 feature_set
|= WIFI_FEATURE_TDLS
;
14376 if (FW_SUPPORTED(dhd
, vsdb
))
14377 feature_set
|= WIFI_FEATURE_TDLS_OFFCHANNEL
;
14378 if (FW_SUPPORTED(dhd
, nan
)) {
14379 feature_set
|= WIFI_FEATURE_NAN
;
14380 /* NAN is essentail for d2d rtt */
14381 if (FW_SUPPORTED(dhd
, rttd2d
))
14382 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14385 if (dhd
->rtt_supported
) {
14386 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14387 feature_set
|= WIFI_FEATURE_D2AP_RTT
;
14389 #endif /* RTT_SUPPORT */
14390 #ifdef LINKSTAT_SUPPORT
14391 feature_set
|= WIFI_FEATURE_LINKSTAT
;
14392 #endif /* LINKSTAT_SUPPORT */
14395 if (dhd_is_pno_supported(dhd
)) {
14396 feature_set
|= WIFI_FEATURE_PNO
;
14397 #ifdef GSCAN_SUPPORT
14398 /* terence 20171115: remove to get GTS PASS
14399 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
14401 // feature_set |= WIFI_FEATURE_GSCAN;
14402 // feature_set |= WIFI_FEATURE_HAL_EPNO;
14403 #endif /* GSCAN_SUPPORT */
14405 #endif /* PNO_SUPPORT */
14406 #ifdef RSSI_MONITOR_SUPPORT
14407 if (FW_SUPPORTED(dhd
, rssi_mon
)) {
14408 feature_set
|= WIFI_FEATURE_RSSI_MONITOR
;
14410 #endif /* RSSI_MONITOR_SUPPORT */
14412 feature_set
|= WIFI_FEATURE_HOTSPOT
;
14414 #ifdef NDO_CONFIG_SUPPORT
14415 feature_set
|= WIFI_FEATURE_CONFIG_NDO
;
14416 #endif /* NDO_CONFIG_SUPPORT */
14418 feature_set
|= WIFI_FEATURE_MKEEP_ALIVE
;
14419 #endif /* KEEP_ALIVE */
14421 return feature_set
;
14425 dhd_dev_get_feature_set_matrix(struct net_device
*dev
, int num
)
14427 int feature_set_full
;
14430 feature_set_full
= dhd_dev_get_feature_set(dev
);
14432 /* Common feature set for all interface */
14433 ret
= (feature_set_full
& WIFI_FEATURE_INFRA
) |
14434 (feature_set_full
& WIFI_FEATURE_INFRA_5G
) |
14435 (feature_set_full
& WIFI_FEATURE_D2D_RTT
) |
14436 (feature_set_full
& WIFI_FEATURE_D2AP_RTT
) |
14437 (feature_set_full
& WIFI_FEATURE_RSSI_MONITOR
) |
14438 (feature_set_full
& WIFI_FEATURE_EPR
);
14440 /* Specific feature group for each interface */
14443 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
) |
14444 /* Not supported yet */
14445 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14446 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14447 (feature_set_full
& WIFI_FEATURE_PNO
) |
14448 (feature_set_full
& WIFI_FEATURE_HAL_EPNO
) |
14449 (feature_set_full
& WIFI_FEATURE_BATCH_SCAN
) |
14450 (feature_set_full
& WIFI_FEATURE_GSCAN
) |
14451 (feature_set_full
& WIFI_FEATURE_HOTSPOT
) |
14452 (feature_set_full
& WIFI_FEATURE_ADDITIONAL_STA
);
14456 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
);
14457 /* Not yet verified NAN with P2P */
14458 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14462 ret
|= (feature_set_full
& WIFI_FEATURE_NAN
) |
14463 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14464 (feature_set_full
& WIFI_FEATURE_TDLS_OFFCHANNEL
);
14468 ret
= WIFI_FEATURE_INVALID
;
14469 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__
, num
));
14476 #ifdef CUSTOM_FORCE_NODFS_FLAG
14478 dhd_dev_set_nodfs(struct net_device
*dev
, u32 nodfs
)
14480 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14483 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
14485 dhd
->pub
.dhd_cflags
&= ~WLAN_PLAT_NODFS_FLAG
;
14486 dhd
->pub
.force_country_change
= TRUE
;
14489 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14491 #ifdef NDO_CONFIG_SUPPORT
14493 dhd_dev_ndo_cfg(struct net_device
*dev
, u8 enable
)
14495 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14496 dhd_pub_t
*dhdp
= &dhd
->pub
;
14500 /* enable ND offload feature (will be enabled in FW on suspend) */
14501 dhdp
->ndo_enable
= TRUE
;
14503 /* Update changes of anycast address & DAD failed address */
14504 ret
= dhd_dev_ndo_update_inet6addr(dev
);
14505 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
14506 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__
, ret
));
14510 /* disable ND offload feature */
14511 dhdp
->ndo_enable
= FALSE
;
14513 /* disable ND offload in FW */
14514 ret
= dhd_ndo_enable(dhdp
, 0);
14516 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__
, ret
));
14522 /* #pragma used as a WAR to fix build failure,
14523 * ignore dropping of 'const' qualifier in 'list_entry' macro
14524 * this pragma disables the warning only for the following function
14526 #pragma GCC diagnostic push
14527 #pragma GCC diagnostic ignored "-Wcast-qual"
14530 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev
*inet6
)
14532 struct inet6_ifaddr
*ifa
;
14533 struct ifacaddr6
*acaddr
= NULL
;
14534 int addr_count
= 0;
14537 read_lock_bh(&inet6
->lock
);
14539 /* Count valid unicast address */
14540 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14541 if ((ifa
->flags
& IFA_F_DADFAILED
) == 0) {
14546 /* Count anycast address */
14547 acaddr
= inet6
->ac_list
;
14550 acaddr
= acaddr
->aca_next
;
14554 read_unlock_bh(&inet6
->lock
);
14560 dhd_dev_ndo_update_inet6addr(struct net_device
*dev
)
14564 struct inet6_dev
*inet6
;
14565 struct inet6_ifaddr
*ifa
;
14566 struct ifacaddr6
*acaddr
= NULL
;
14567 struct in6_addr
*ipv6_addr
= NULL
;
14572 * this function evaulates host ip address in struct inet6_dev
14573 * unicast addr in inet6_dev->addr_list
14574 * anycast addr in inet6_dev->ac_list
14575 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14576 * access on null(freed) pointer.
14580 inet6
= dev
->ip6_ptr
;
14582 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__
));
14586 dhd
= DHD_DEV_INFO(dev
);
14588 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__
));
14593 if (dhd_net2idx(dhd
, dev
) != 0) {
14594 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__
));
14598 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__
));
14602 /* Check host IP overflow */
14603 cnt
= dhd_dev_ndo_get_valid_inet6addr_count(inet6
);
14604 if (cnt
> dhdp
->ndo_max_host_ip
) {
14605 if (!dhdp
->ndo_host_ip_overflow
) {
14606 dhdp
->ndo_host_ip_overflow
= TRUE
;
14607 /* Disable ND offload in FW */
14608 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__
));
14609 ret
= dhd_ndo_enable(dhdp
, 0);
14616 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14617 * driver need to lock inet6_dev while accessing structure. but, driver
14618 * cannot use ioctl while inet6_dev locked since it requires scheduling
14619 * hence, copy addresses to the buffer and do ioctl after unlock.
14621 ipv6_addr
= (struct in6_addr
*)MALLOC(dhdp
->osh
,
14622 sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
14624 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__
));
14628 /* Find DAD failed unicast address to be removed */
14630 read_lock_bh(&inet6
->lock
);
14631 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14632 /* DAD failed unicast address */
14633 if ((ifa
->flags
& IFA_F_DADFAILED
) &&
14634 (cnt
< dhdp
->ndo_max_host_ip
)) {
14635 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
, sizeof(struct in6_addr
));
14639 read_unlock_bh(&inet6
->lock
);
14641 /* Remove DAD failed unicast address */
14642 for (i
= 0; i
< cnt
; i
++) {
14643 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__
));
14644 ret
= dhd_ndo_remove_ip_by_addr(dhdp
, (char *)&ipv6_addr
[i
], 0);
14650 /* Remove all anycast address */
14651 ret
= dhd_ndo_remove_ip_by_type(dhdp
, WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
14657 * if ND offload was disabled due to host ip overflow,
14658 * attempt to add valid unicast address.
14660 if (dhdp
->ndo_host_ip_overflow
) {
14661 /* Find valid unicast address */
14663 read_lock_bh(&inet6
->lock
);
14664 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14665 /* valid unicast address */
14666 if (!(ifa
->flags
& IFA_F_DADFAILED
) &&
14667 (cnt
< dhdp
->ndo_max_host_ip
)) {
14668 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
,
14669 sizeof(struct in6_addr
));
14673 read_unlock_bh(&inet6
->lock
);
14675 /* Add valid unicast address */
14676 for (i
= 0; i
< cnt
; i
++) {
14677 ret
= dhd_ndo_add_ip_with_type(dhdp
,
14678 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_UNICAST
, 0);
14685 /* Find anycast address */
14687 read_lock_bh(&inet6
->lock
);
14688 acaddr
= inet6
->ac_list
;
14690 if (cnt
< dhdp
->ndo_max_host_ip
) {
14691 memcpy(&ipv6_addr
[cnt
], &acaddr
->aca_addr
, sizeof(struct in6_addr
));
14694 acaddr
= acaddr
->aca_next
;
14696 read_unlock_bh(&inet6
->lock
);
14698 /* Add anycast address */
14699 for (i
= 0; i
< cnt
; i
++) {
14700 ret
= dhd_ndo_add_ip_with_type(dhdp
,
14701 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
14707 /* Now All host IP addr were added successfully */
14708 if (dhdp
->ndo_host_ip_overflow
) {
14709 dhdp
->ndo_host_ip_overflow
= FALSE
;
14710 if (dhdp
->in_suspend
) {
14711 /* drvier is in (early) suspend state, need to enable ND offload in FW */
14712 DHD_INFO(("%s: enable NDO\n", __FUNCTION__
));
14713 ret
= dhd_ndo_enable(dhdp
, 1);
14719 MFREE(dhdp
->osh
, ipv6_addr
, sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
14724 #pragma GCC diagnostic pop
14726 #endif /* NDO_CONFIG_SUPPORT */
14729 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
14731 dhd_dev_pno_stop_for_ssid(struct net_device
*dev
)
14733 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14735 return (dhd_pno_stop_for_ssid(&dhd
->pub
));
14738 /* Linux wrapper to call common dhd_pno_set_for_ssid */
14740 dhd_dev_pno_set_for_ssid(struct net_device
*dev
, wlc_ssid_ext_t
* ssids_local
, int nssid
,
14741 uint16 scan_fr
, int pno_repeat
, int pno_freq_expo_max
, uint16
*channel_list
, int nchan
)
14743 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14745 return (dhd_pno_set_for_ssid(&dhd
->pub
, ssids_local
, nssid
, scan_fr
,
14746 pno_repeat
, pno_freq_expo_max
, channel_list
, nchan
));
14749 /* Linux wrapper to call common dhd_pno_enable */
14751 dhd_dev_pno_enable(struct net_device
*dev
, int enable
)
14753 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14755 return (dhd_pno_enable(&dhd
->pub
, enable
));
14758 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
14760 dhd_dev_pno_set_for_hotlist(struct net_device
*dev
, wl_pfn_bssid_t
*p_pfn_bssid
,
14761 struct dhd_pno_hotlist_params
*hotlist_params
)
14763 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14764 return (dhd_pno_set_for_hotlist(&dhd
->pub
, p_pfn_bssid
, hotlist_params
));
14766 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
14768 dhd_dev_pno_stop_for_batch(struct net_device
*dev
)
14770 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14771 return (dhd_pno_stop_for_batch(&dhd
->pub
));
14774 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
14776 dhd_dev_pno_set_for_batch(struct net_device
*dev
, struct dhd_pno_batch_params
*batch_params
)
14778 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14779 return (dhd_pno_set_for_batch(&dhd
->pub
, batch_params
));
14782 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
14784 dhd_dev_pno_get_for_batch(struct net_device
*dev
, char *buf
, int bufsize
)
14786 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14787 return (dhd_pno_get_for_batch(&dhd
->pub
, buf
, bufsize
, PNO_STATUS_NORMAL
));
14789 #endif /* PNO_SUPPORT */
14791 #if defined(PNO_SUPPORT)
14792 #ifdef GSCAN_SUPPORT
14794 dhd_dev_is_legacy_pno_enabled(struct net_device
*dev
)
14796 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14798 return (dhd_is_legacy_pno_enabled(&dhd
->pub
));
14802 dhd_dev_set_epno(struct net_device
*dev
)
14804 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14808 return dhd_pno_set_epno(&dhd
->pub
);
14811 dhd_dev_flush_fw_epno(struct net_device
*dev
)
14813 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14817 return dhd_pno_flush_fw_epno(&dhd
->pub
);
14820 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14822 dhd_dev_pno_set_cfg_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
14823 void *buf
, bool flush
)
14825 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14827 return (dhd_pno_set_cfg_gscan(&dhd
->pub
, type
, buf
, flush
));
14830 /* Linux wrapper to call common dhd_wait_batch_results_complete */
14832 dhd_dev_wait_batch_results_complete(struct net_device
*dev
)
14834 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14836 return (dhd_wait_batch_results_complete(&dhd
->pub
));
14839 /* Linux wrapper to call common dhd_pno_lock_batch_results */
14841 dhd_dev_pno_lock_access_batch_results(struct net_device
*dev
)
14843 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14845 return (dhd_pno_lock_batch_results(&dhd
->pub
));
14847 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
14849 dhd_dev_pno_unlock_access_batch_results(struct net_device
*dev
)
14851 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14853 return (dhd_pno_unlock_batch_results(&dhd
->pub
));
14856 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
14858 dhd_dev_pno_run_gscan(struct net_device
*dev
, bool run
, bool flush
)
14860 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14862 return (dhd_pno_initiate_gscan_request(&dhd
->pub
, run
, flush
));
14865 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
14867 dhd_dev_pno_enable_full_scan_result(struct net_device
*dev
, bool real_time_flag
)
14869 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14871 return (dhd_pno_enable_full_scan_result(&dhd
->pub
, real_time_flag
));
14874 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
14876 dhd_dev_hotlist_scan_event(struct net_device
*dev
,
14877 const void *data
, int *send_evt_bytes
, hotlist_type_t type
)
14879 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14881 return (dhd_handle_hotlist_scan_evt(&dhd
->pub
, data
, send_evt_bytes
, type
));
14884 /* Linux wrapper to call common dhd_process_full_gscan_result */
14886 dhd_dev_process_full_gscan_result(struct net_device
*dev
,
14887 const void *data
, uint32 len
, int *send_evt_bytes
)
14889 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14891 return (dhd_process_full_gscan_result(&dhd
->pub
, data
, len
, send_evt_bytes
));
14895 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device
*dev
, hotlist_type_t type
)
14897 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14899 dhd_gscan_hotlist_cache_cleanup(&dhd
->pub
, type
);
14905 dhd_dev_gscan_batch_cache_cleanup(struct net_device
*dev
)
14907 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14909 return (dhd_gscan_batch_cache_cleanup(&dhd
->pub
));
14912 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
14914 dhd_dev_retrieve_batch_scan(struct net_device
*dev
)
14916 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14918 return (dhd_retreive_batch_scan_results(&dhd
->pub
));
14921 /* Linux wrapper to call common dhd_pno_process_epno_result */
14922 void * dhd_dev_process_epno_result(struct net_device
*dev
,
14923 const void *data
, uint32 event
, int *send_evt_bytes
)
14925 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14927 return (dhd_pno_process_epno_result(&dhd
->pub
, data
, event
, send_evt_bytes
));
14931 dhd_dev_set_lazy_roam_cfg(struct net_device
*dev
,
14932 wlc_roam_exp_params_t
*roam_param
)
14934 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14935 wl_roam_exp_cfg_t roam_exp_cfg
;
14939 return BCME_BADARG
;
14942 DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n",
14943 roam_param
->a_band_boost_threshold
, roam_param
->a_band_penalty_threshold
));
14944 DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
14945 roam_param
->a_band_boost_factor
, roam_param
->a_band_penalty_factor
,
14946 roam_param
->cur_bssid_boost
));
14947 DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
14948 roam_param
->alert_roam_trigger_threshold
, roam_param
->a_band_max_boost
));
14950 memcpy(&roam_exp_cfg
.params
, roam_param
, sizeof(*roam_param
));
14951 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
14952 roam_exp_cfg
.flags
= ROAM_EXP_CFG_PRESENT
;
14953 if (dhd
->pub
.lazy_roam_enable
) {
14954 roam_exp_cfg
.flags
|= ROAM_EXP_ENABLE_FLAG
;
14956 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
14957 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
14960 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
14966 dhd_dev_lazy_roam_enable(struct net_device
*dev
, uint32 enable
)
14969 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14970 wl_roam_exp_cfg_t roam_exp_cfg
;
14972 memset(&roam_exp_cfg
, 0, sizeof(roam_exp_cfg
));
14973 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
14975 roam_exp_cfg
.flags
= ROAM_EXP_ENABLE_FLAG
;
14978 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
14979 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
14982 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
14984 dhd
->pub
.lazy_roam_enable
= (enable
!= 0);
14990 dhd_dev_set_lazy_roam_bssid_pref(struct net_device
*dev
,
14991 wl_bssid_pref_cfg_t
*bssid_pref
, uint32 flush
)
14995 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14997 bssid_pref
->version
= BSSID_PREF_LIST_VERSION
;
14998 /* By default programming bssid pref flushes out old values */
14999 bssid_pref
->flags
= (flush
&& !bssid_pref
->count
) ? ROAM_EXP_CLEAR_BSSID_PREF
: 0;
15000 len
= sizeof(wl_bssid_pref_cfg_t
);
15001 len
+= (bssid_pref
->count
- 1) * sizeof(wl_bssid_pref_list_t
);
15002 err
= dhd_iovar(&(dhd
->pub
), 0, "roam_exp_bssid_pref", (char *)bssid_pref
,
15003 len
, NULL
, 0, TRUE
);
15004 if (err
!= BCME_OK
) {
15005 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15011 dhd_dev_set_blacklist_bssid(struct net_device
*dev
, maclist_t
*blacklist
,
15012 uint32 len
, uint32 flush
)
15015 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15019 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACLIST
, (char *)blacklist
,
15021 if (err
!= BCME_OK
) {
15022 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__
, err
));
15026 /* By default programming blacklist flushes out old values */
15027 macmode
= (flush
&& !blacklist
) ? WLC_MACMODE_DISABLED
: WLC_MACMODE_DENY
;
15028 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACMODE
, (char *)&macmode
,
15029 sizeof(macmode
), TRUE
, 0);
15030 if (err
!= BCME_OK
) {
15031 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__
, err
));
15037 dhd_dev_set_whitelist_ssid(struct net_device
*dev
, wl_ssid_whitelist_t
*ssid_whitelist
,
15038 uint32 len
, uint32 flush
)
15041 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15042 wl_ssid_whitelist_t whitelist_ssid_flush
;
15044 if (!ssid_whitelist
) {
15046 ssid_whitelist
= &whitelist_ssid_flush
;
15047 ssid_whitelist
->ssid_count
= 0;
15049 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__
));
15050 return BCME_BADARG
;
15053 ssid_whitelist
->version
= SSID_WHITELIST_VERSION
;
15054 ssid_whitelist
->flags
= flush
? ROAM_EXP_CLEAR_SSID_WHITELIST
: 0;
15055 err
= dhd_iovar(&(dhd
->pub
), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist
,
15056 len
, NULL
, 0, TRUE
);
15057 if (err
!= BCME_OK
) {
15058 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15062 #endif /* GSCAN_SUPPORT */
15064 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15065 /* Linux wrapper to call common dhd_pno_get_gscan */
15067 dhd_dev_pno_get_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
15068 void *info
, uint32
*len
)
15070 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15072 return (dhd_pno_get_gscan(&dhd
->pub
, type
, info
, len
));
15074 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15077 #ifdef RSSI_MONITOR_SUPPORT
15079 dhd_dev_set_rssi_monitor_cfg(struct net_device
*dev
, int start
,
15080 int8 max_rssi
, int8 min_rssi
)
15083 wl_rssi_monitor_cfg_t rssi_monitor
;
15084 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15086 rssi_monitor
.version
= RSSI_MONITOR_VERSION
;
15087 rssi_monitor
.max_rssi
= max_rssi
;
15088 rssi_monitor
.min_rssi
= min_rssi
;
15089 rssi_monitor
.flags
= start
? 0: RSSI_MONITOR_STOP
;
15090 err
= dhd_iovar(&(dhd
->pub
), 0, "rssi_monitor", (char *)&rssi_monitor
,
15091 sizeof(rssi_monitor
), NULL
, 0, TRUE
);
15092 if (err
< 0 && err
!= BCME_UNSUPPORTED
) {
15093 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__
, err
));
15097 #endif /* RSSI_MONITOR_SUPPORT */
15099 #ifdef DHDTCPACK_SUPPRESS
15100 int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device
*dev
, uint8 enable
)
15103 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15105 err
= dhd_tcpack_suppress_set(&(dhd
->pub
), enable
);
15106 if (err
!= BCME_OK
) {
15107 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__
, err
));
15111 #endif /* DHDTCPACK_SUPPRESS */
15114 dhd_dev_cfg_rand_mac_oui(struct net_device
*dev
, uint8
*oui
)
15116 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15117 dhd_pub_t
*dhdp
= &dhd
->pub
;
15119 if (!dhdp
|| !oui
) {
15120 DHD_ERROR(("NULL POINTER : %s\n",
15124 if (ETHER_ISMULTI(oui
)) {
15125 DHD_ERROR(("Expected unicast OUI\n"));
15128 uint8
*rand_mac_oui
= dhdp
->rand_mac_oui
;
15129 memcpy(rand_mac_oui
, oui
, DOT11_OUI_LEN
);
15130 DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui
[0],
15131 rand_mac_oui
[1], rand_mac_oui
[2]));
15137 dhd_set_rand_mac_oui(dhd_pub_t
*dhd
)
15140 wl_pfn_macaddr_cfg_t wl_cfg
;
15141 uint8
*rand_mac_oui
= dhd
->rand_mac_oui
;
15143 memset(&wl_cfg
.macaddr
, 0, ETHER_ADDR_LEN
);
15144 memcpy(&wl_cfg
.macaddr
, rand_mac_oui
, DOT11_OUI_LEN
);
15145 wl_cfg
.version
= WL_PFN_MACADDR_CFG_VER
;
15146 if (ETHER_ISNULLADDR(&wl_cfg
.macaddr
)) {
15149 wl_cfg
.flags
= (WL_PFN_MAC_OUI_ONLY_MASK
| WL_PFN_SET_MAC_UNASSOC_MASK
);
15152 DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui
[0],
15153 rand_mac_oui
[1], rand_mac_oui
[2]));
15155 err
= dhd_iovar(dhd
, 0, "pfn_macaddr", (char *)&wl_cfg
, sizeof(wl_cfg
), NULL
, 0, TRUE
);
15157 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__
, err
));
15164 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15166 dhd_dev_rtt_set_cfg(struct net_device
*dev
, void *buf
)
15168 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15170 return (dhd_rtt_set_cfg(&dhd
->pub
, buf
));
15174 dhd_dev_rtt_cancel_cfg(struct net_device
*dev
, struct ether_addr
*mac_list
, int mac_cnt
)
15176 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15178 return (dhd_rtt_stop(&dhd
->pub
, mac_list
, mac_cnt
));
15182 dhd_dev_rtt_register_noti_callback(struct net_device
*dev
, void *ctx
, dhd_rtt_compl_noti_fn noti_fn
)
15184 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15186 return (dhd_rtt_register_noti_callback(&dhd
->pub
, ctx
, noti_fn
));
15190 dhd_dev_rtt_unregister_noti_callback(struct net_device
*dev
, dhd_rtt_compl_noti_fn noti_fn
)
15192 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15194 return (dhd_rtt_unregister_noti_callback(&dhd
->pub
, noti_fn
));
15198 dhd_dev_rtt_capability(struct net_device
*dev
, rtt_capabilities_t
*capa
)
15200 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15202 return (dhd_rtt_capability(&dhd
->pub
, capa
));
15206 dhd_dev_rtt_avail_channel(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15208 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15209 return (dhd_rtt_avail_channel(&dhd
->pub
, channel_info
));
15213 dhd_dev_rtt_enable_responder(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15215 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15216 return (dhd_rtt_enable_responder(&dhd
->pub
, channel_info
));
15219 int dhd_dev_rtt_cancel_responder(struct net_device
*dev
)
15221 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15222 return (dhd_rtt_cancel_responder(&dhd
->pub
));
15224 #endif /* WL_CFG80211 */
15225 #endif /* RTT_SUPPORT */
15228 #define KA_TEMP_BUF_SIZE 512
15229 #define KA_FRAME_SIZE 300
15232 dhd_dev_start_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
, uint8
*ip_pkt
,
15233 uint16 ip_pkt_len
, uint8
* src_mac
, uint8
* dst_mac
, uint32 period_msec
)
15235 const int ETHERTYPE_LEN
= 2;
15238 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15239 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
= NULL
;
15242 int res
= BCME_ERROR
;
15246 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15247 char *pmac_frame
= NULL
;
15248 char *pmac_frame_begin
= NULL
;
15251 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15252 * dongle shall reject a mkeep_alive request.
15254 if (!dhd_support_sta_mode(dhd_pub
))
15257 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15259 if ((pbuf
= kzalloc(KA_TEMP_BUF_SIZE
, GFP_KERNEL
)) == NULL
) {
15260 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15265 if ((pmac_frame
= kzalloc(KA_FRAME_SIZE
, GFP_KERNEL
)) == NULL
) {
15266 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE
));
15270 pmac_frame_begin
= pmac_frame
;
15273 * Get current mkeep-alive status.
15275 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
, sizeof(mkeep_alive_id
), pbuf
,
15276 KA_TEMP_BUF_SIZE
, FALSE
);
15278 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15281 /* Check available ID whether it is occupied */
15282 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15283 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15284 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15285 __FUNCTION__
, mkeep_alive_id
));
15287 /* Current occupied ID info */
15288 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__
));
15289 DHD_ERROR((" Id : %d\n"
15290 " Period: %d msec\n"
15293 mkeep_alive_pktp
->keep_alive_id
,
15294 dtoh32(mkeep_alive_pktp
->period_msec
),
15295 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15297 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15298 DHD_ERROR(("%02x", mkeep_alive_pktp
->data
[i
]));
15302 res
= BCME_NOTFOUND
;
15307 /* Request the specified ID */
15308 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15309 memset(pbuf
, 0, KA_TEMP_BUF_SIZE
);
15310 str
= "mkeep_alive";
15311 str_len
= strlen(str
);
15312 strncpy(pbuf
, str
, str_len
);
15313 pbuf
[str_len
] = '\0';
15315 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) (pbuf
+ str_len
+ 1);
15316 mkeep_alive_pkt
.period_msec
= htod32(period_msec
);
15317 buf_len
= str_len
+ 1;
15318 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15319 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15322 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15324 buf_len
+= WL_MKEEP_ALIVE_FIXED_LEN
;
15327 * Build up Ethernet Frame
15330 /* Mapping dest mac addr */
15331 memcpy(pmac_frame
, dst_mac
, ETHER_ADDR_LEN
);
15332 pmac_frame
+= ETHER_ADDR_LEN
;
15334 /* Mapping src mac addr */
15335 memcpy(pmac_frame
, src_mac
, ETHER_ADDR_LEN
);
15336 pmac_frame
+= ETHER_ADDR_LEN
;
15338 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15339 *(pmac_frame
++) = 0x08;
15340 *(pmac_frame
++) = 0x00;
15342 /* Mapping IP pkt */
15343 memcpy(pmac_frame
, ip_pkt
, ip_pkt_len
);
15344 pmac_frame
+= ip_pkt_len
;
15347 * Length of ether frame (assume to be all hexa bytes)
15348 * = src mac + dst mac + ether type + ip pkt len
15350 len_bytes
= ETHER_ADDR_LEN
*2 + ETHERTYPE_LEN
+ ip_pkt_len
;
15351 memcpy(mkeep_alive_pktp
->data
, pmac_frame_begin
, len_bytes
);
15352 buf_len
+= len_bytes
;
15353 mkeep_alive_pkt
.len_bytes
= htod16(len_bytes
);
15356 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15357 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15358 * guarantee that the buffer is properly aligned.
15360 memcpy((char *)mkeep_alive_pktp
, &mkeep_alive_pkt
, WL_MKEEP_ALIVE_FIXED_LEN
);
15362 res
= dhd_wl_ioctl_cmd(dhd_pub
, WLC_SET_VAR
, pbuf
, buf_len
, TRUE
, 0);
15364 kfree(pmac_frame_begin
);
15370 dhd_dev_stop_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
)
15373 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15374 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
;
15375 int res
= BCME_ERROR
;
15379 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15380 * dongle shall reject a mkeep_alive request.
15382 if (!dhd_support_sta_mode(dhd_pub
))
15385 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15388 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15390 if ((pbuf
= kmalloc(KA_TEMP_BUF_SIZE
, GFP_KERNEL
)) == NULL
) {
15391 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15395 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
,
15396 sizeof(mkeep_alive_id
), pbuf
, KA_TEMP_BUF_SIZE
, FALSE
);
15398 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15401 /* Check occupied ID */
15402 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15403 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__
));
15404 DHD_INFO((" Id : %d\n"
15405 " Period: %d msec\n"
15408 mkeep_alive_pktp
->keep_alive_id
,
15409 dtoh32(mkeep_alive_pktp
->period_msec
),
15410 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15412 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15413 DHD_INFO(("%02x", mkeep_alive_pktp
->data
[i
]));
15418 /* Make it stop if available */
15419 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15420 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id
));
15421 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15423 mkeep_alive_pkt
.period_msec
= 0;
15424 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15425 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15426 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15428 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive",
15429 (char *)&mkeep_alive_pkt
,
15430 WL_MKEEP_ALIVE_FIXED_LEN
, NULL
, 0, TRUE
);
15432 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__
, mkeep_alive_id
));
15433 res
= BCME_NOTFOUND
;
15439 #endif /* KEEP_ALIVE */
15441 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15442 static void _dhd_apf_lock_local(dhd_info_t
*dhd
)
15444 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15446 mutex_lock(&dhd
->dhd_apf_mutex
);
15451 static void _dhd_apf_unlock_local(dhd_info_t
*dhd
)
15453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15455 mutex_unlock(&dhd
->dhd_apf_mutex
);
15461 __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
15462 u8
* program
, uint32 program_len
)
15464 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15465 dhd_pub_t
*dhdp
= &dhd
->pub
;
15466 wl_pkt_filter_t
* pkt_filterp
;
15467 wl_apf_program_t
*apf_program
;
15469 u32 cmd_len
, buf_len
;
15472 char cmd
[] = "pkt_filter_add";
15474 ifidx
= dhd_net2idx(dhd
, ndev
);
15475 if (ifidx
== DHD_BAD_IF
) {
15476 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15480 cmd_len
= sizeof(cmd
);
15482 /* Check if the program_len is more than the expected len
15483 * and if the program is NULL return from here.
15485 if ((program_len
> WL_APF_PROGRAM_MAX_SIZE
) || (program
== NULL
)) {
15486 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15487 __FUNCTION__
, program_len
, program
));
15490 buf_len
= cmd_len
+ WL_PKT_FILTER_FIXED_LEN
+
15491 WL_APF_PROGRAM_FIXED_LEN
+ program_len
;
15493 kflags
= in_atomic() ? GFP_ATOMIC
: GFP_KERNEL
;
15494 buf
= kzalloc(buf_len
, kflags
);
15495 if (unlikely(!buf
)) {
15496 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15500 memcpy(buf
, cmd
, cmd_len
);
15502 pkt_filterp
= (wl_pkt_filter_t
*) (buf
+ cmd_len
);
15503 pkt_filterp
->id
= htod32(filter_id
);
15504 pkt_filterp
->negate_match
= htod32(FALSE
);
15505 pkt_filterp
->type
= htod32(WL_PKT_FILTER_TYPE_APF_MATCH
);
15507 apf_program
= &pkt_filterp
->u
.apf_program
;
15508 apf_program
->version
= htod16(WL_APF_INTERNAL_VERSION
);
15509 apf_program
->instr_len
= htod16(program_len
);
15510 memcpy(apf_program
->instrs
, program
, program_len
);
15512 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15513 if (unlikely(ret
)) {
15514 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15515 __FUNCTION__
, filter_id
, ret
));
15525 __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
15526 uint32 mode
, uint32 enable
)
15528 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15529 dhd_pub_t
*dhdp
= &dhd
->pub
;
15530 wl_pkt_filter_enable_t
* pkt_filterp
;
15532 u32 cmd_len
, buf_len
;
15535 char cmd
[] = "pkt_filter_enable";
15537 ifidx
= dhd_net2idx(dhd
, ndev
);
15538 if (ifidx
== DHD_BAD_IF
) {
15539 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15543 cmd_len
= sizeof(cmd
);
15544 buf_len
= cmd_len
+ sizeof(*pkt_filterp
);
15546 kflags
= in_atomic() ? GFP_ATOMIC
: GFP_KERNEL
;
15547 buf
= kzalloc(buf_len
, kflags
);
15548 if (unlikely(!buf
)) {
15549 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15553 memcpy(buf
, cmd
, cmd_len
);
15555 pkt_filterp
= (wl_pkt_filter_enable_t
*) (buf
+ cmd_len
);
15556 pkt_filterp
->id
= htod32(filter_id
);
15557 pkt_filterp
->enable
= htod32(enable
);
15559 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15560 if (unlikely(ret
)) {
15561 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15562 __FUNCTION__
, filter_id
, ret
));
15566 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_mode", dhd_master_mode
,
15567 WLC_SET_VAR
, TRUE
, ifidx
);
15568 if (unlikely(ret
)) {
15569 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15570 __FUNCTION__
, filter_id
, ret
));
15581 __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
)
15583 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15584 dhd_pub_t
*dhdp
= &dhd
->pub
;
15587 ifidx
= dhd_net2idx(dhd
, ndev
);
15588 if (ifidx
== DHD_BAD_IF
) {
15589 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15593 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_delete",
15594 htod32(filter_id
), WLC_SET_VAR
, TRUE
, ifidx
);
15595 if (unlikely(ret
)) {
15596 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15597 __FUNCTION__
, filter_id
, ret
));
15603 void dhd_apf_lock(struct net_device
*dev
)
15605 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15606 _dhd_apf_lock_local(dhd
);
15609 void dhd_apf_unlock(struct net_device
*dev
)
15611 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15612 _dhd_apf_unlock_local(dhd
);
15616 dhd_dev_apf_get_version(struct net_device
*ndev
, uint32
*version
)
15618 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15619 dhd_pub_t
*dhdp
= &dhd
->pub
;
15622 if (!FW_SUPPORTED(dhdp
, apf
)) {
15623 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
15626 * Notify Android framework that APF is not supported by setting
15633 ifidx
= dhd_net2idx(dhd
, ndev
);
15634 if (ifidx
== DHD_BAD_IF
) {
15635 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15639 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_ver", version
,
15640 WLC_GET_VAR
, FALSE
, ifidx
);
15641 if (unlikely(ret
)) {
15642 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15643 __FUNCTION__
, ret
));
15650 dhd_dev_apf_get_max_len(struct net_device
*ndev
, uint32
*max_len
)
15652 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15653 dhd_pub_t
*dhdp
= &dhd
->pub
;
15656 if (!FW_SUPPORTED(dhdp
, apf
)) {
15657 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
15662 ifidx
= dhd_net2idx(dhd
, ndev
);
15663 if (ifidx
== DHD_BAD_IF
) {
15664 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
15668 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_size_limit", max_len
,
15669 WLC_GET_VAR
, FALSE
, ifidx
);
15670 if (unlikely(ret
)) {
15671 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15672 __FUNCTION__
, ret
));
15679 dhd_dev_apf_add_filter(struct net_device
*ndev
, u8
* program
,
15680 uint32 program_len
)
15682 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15683 dhd_pub_t
*dhdp
= &dhd
->pub
;
15686 DHD_APF_LOCK(ndev
);
15688 /* delete, if filter already exists */
15689 if (dhdp
->apf_set
) {
15690 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
15691 if (unlikely(ret
)) {
15694 dhdp
->apf_set
= FALSE
;
15697 ret
= __dhd_apf_add_filter(ndev
, PKT_FILTER_APF_ID
, program
, program_len
);
15701 dhdp
->apf_set
= TRUE
;
15703 if (dhdp
->in_suspend
&& dhdp
->apf_set
&& !(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
15704 /* Driver is still in (early) suspend state, enable APF filter back */
15705 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
15706 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
15709 DHD_APF_UNLOCK(ndev
);
15715 dhd_dev_apf_enable_filter(struct net_device
*ndev
)
15717 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15718 dhd_pub_t
*dhdp
= &dhd
->pub
;
15721 DHD_APF_LOCK(ndev
);
15723 if (dhdp
->apf_set
&& !(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
15724 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
15725 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
15728 DHD_APF_UNLOCK(ndev
);
15734 dhd_dev_apf_disable_filter(struct net_device
*ndev
)
15736 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15737 dhd_pub_t
*dhdp
= &dhd
->pub
;
15740 DHD_APF_LOCK(ndev
);
15742 if (dhdp
->apf_set
) {
15743 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
15744 PKT_FILTER_MODE_FORWARD_ON_MATCH
, FALSE
);
15747 DHD_APF_UNLOCK(ndev
);
15753 dhd_dev_apf_delete_filter(struct net_device
*ndev
)
15755 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15756 dhd_pub_t
*dhdp
= &dhd
->pub
;
15759 DHD_APF_LOCK(ndev
);
15761 if (dhdp
->apf_set
) {
15762 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
15764 dhdp
->apf_set
= FALSE
;
15768 DHD_APF_UNLOCK(ndev
);
15772 #endif /* PKT_FILTER_SUPPORT && APF */
15774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
15775 static void dhd_hang_process(void *dhd_info
, void *event_info
, u8 event
)
15778 struct net_device
*dev
;
15780 dhd
= (dhd_info_t
*)dhd_info
;
15781 dev
= dhd
->iflist
[0]->net
;
15785 * For HW2, dev_close need to be done to recover
15786 * from upper layer after hang. For Interposer skip
15787 * dev_close so that dhd iovars can be used to take
15788 * socramdump after crash, also skip for HW4 as
15789 * handling of hang event is different
15791 #if !defined(CUSTOMER_HW2_INTERPOSER)
15796 #if defined(WL_WIRELESS_EXT)
15797 wl_iw_send_priv_event(dev
, "HANG");
15799 #if defined(WL_CFG80211)
15800 wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
15805 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
15806 extern dhd_pub_t
*link_recovery
;
15807 void dhd_host_recover_link(void)
15809 DHD_ERROR(("****** %s ******\n", __FUNCTION__
));
15810 link_recovery
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
15811 dhd_bus_set_linkdown(link_recovery
, TRUE
);
15812 dhd_os_send_hang_message(link_recovery
);
15814 EXPORT_SYMBOL(dhd_host_recover_link
);
15815 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
15817 int dhd_os_send_hang_message(dhd_pub_t
*dhdp
)
15821 #if defined(DHD_HANG_SEND_UP_TEST)
15822 if (dhdp
->req_hang_type
) {
15823 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
15824 __FUNCTION__
, dhdp
->req_hang_type
));
15825 dhdp
->req_hang_type
= 0;
15827 #endif /* DHD_HANG_SEND_UP_TEST */
15829 if (!dhdp
->hang_was_sent
) {
15830 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
15831 dhdp
->hang_counts
++;
15832 if (dhdp
->hang_counts
>= MAX_CONSECUTIVE_HANG_COUNTS
) {
15833 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
15834 __func__
, dhdp
->hang_counts
));
15837 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
15838 #ifdef DHD_DEBUG_UART
15839 /* If PCIe lane has broken, execute the debug uart application
15840 * to gether a ramdump data from dongle via uart
15842 if (!dhdp
->info
->duart_execute
) {
15843 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
15844 (void *)dhdp
, DHD_WQ_WORK_DEBUG_UART_DUMP
,
15845 dhd_debug_uart_exec_rd
, DHD_WQ_WORK_PRIORITY_HIGH
);
15847 #endif /* DHD_DEBUG_UART */
15848 dhdp
->hang_was_sent
= 1;
15849 #ifdef BT_OVER_SDIO
15850 dhdp
->is_bt_recovery_required
= TRUE
;
15852 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dhdp
,
15853 DHD_WQ_WORK_HANG_MSG
, dhd_hang_process
, DHD_WQ_WORK_PRIORITY_HIGH
);
15854 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__
,
15855 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, dhdp
->busstate
));
15861 int net_os_send_hang_message(struct net_device
*dev
)
15863 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15867 /* Report FW problem when enabled */
15868 if (dhd
->pub
.hang_report
) {
15869 #ifdef BT_OVER_SDIO
15870 if (netif_running(dev
)) {
15871 #endif /* BT_OVER_SDIO */
15872 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
15873 ret
= dhd_os_send_hang_message(&dhd
->pub
);
15875 ret
= wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
15877 #ifdef BT_OVER_SDIO
15879 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__
));
15880 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev
));
15881 #endif /* BT_OVER_SDIO */
15883 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
15890 int net_os_send_hang_message_reason(struct net_device
*dev
, const char *string_num
)
15892 dhd_info_t
*dhd
= NULL
;
15893 dhd_pub_t
*dhdp
= NULL
;
15896 dhd
= DHD_DEV_INFO(dev
);
15901 if (!dhd
|| !dhdp
) {
15905 reason
= bcm_strtoul(string_num
, NULL
, 0);
15906 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__
, reason
));
15908 if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
15912 dhdp
->hang_reason
= reason
;
15914 return net_os_send_hang_message(dev
);
15916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
15919 int dhd_net_wifi_platform_set_power(struct net_device
*dev
, bool on
, unsigned long delay_msec
)
15921 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15922 return wifi_platform_set_power(dhd
->adapter
, on
, delay_msec
);
15925 bool dhd_force_country_change(struct net_device
*dev
)
15927 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15929 if (dhd
&& dhd
->pub
.up
)
15930 return dhd
->pub
.force_country_change
;
15934 void dhd_get_customized_country_code(struct net_device
*dev
, char *country_iso_code
,
15935 wl_country_t
*cspec
)
15937 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15938 #if defined(DHD_BLOB_EXISTENCE_CHECK)
15939 if (!dhd
->pub
.is_blob
)
15940 #endif /* DHD_BLOB_EXISTENCE_CHECK */
15942 #if defined(CUSTOM_COUNTRY_CODE)
15943 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
,
15944 dhd
->pub
.dhd_cflags
);
15946 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
);
15947 #endif /* CUSTOM_COUNTRY_CODE */
15950 BCM_REFERENCE(dhd
);
15953 void dhd_bus_country_set(struct net_device
*dev
, wl_country_t
*cspec
, bool notify
)
15955 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15957 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
15960 if (dhd
&& dhd
->pub
.up
) {
15961 memcpy(&dhd
->pub
.dhd_cspec
, cspec
, sizeof(wl_country_t
));
15963 wl_update_wiphybands(cfg
, notify
);
15968 void dhd_bus_band_set(struct net_device
*dev
, uint band
)
15970 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15972 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
15974 if (dhd
&& dhd
->pub
.up
) {
15976 wl_update_wiphybands(cfg
, true);
15981 int dhd_net_set_fw_path(struct net_device
*dev
, char *fw
)
15983 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15985 if (!fw
|| fw
[0] == '\0')
15988 strncpy(dhd
->fw_path
, fw
, sizeof(dhd
->fw_path
) - 1);
15989 dhd
->fw_path
[sizeof(dhd
->fw_path
)-1] = '\0';
15991 #if defined(SOFTAP)
15992 if (strstr(fw
, "apsta") != NULL
) {
15993 DHD_INFO(("GOT APSTA FIRMWARE\n"));
15994 ap_fw_loaded
= TRUE
;
15996 DHD_INFO(("GOT STA FIRMWARE\n"));
15997 ap_fw_loaded
= FALSE
;
16003 void dhd_net_if_lock(struct net_device
*dev
)
16005 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16006 dhd_net_if_lock_local(dhd
);
16009 void dhd_net_if_unlock(struct net_device
*dev
)
16011 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16012 dhd_net_if_unlock_local(dhd
);
16015 static void dhd_net_if_lock_local(dhd_info_t
*dhd
)
16017 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16019 mutex_lock(&dhd
->dhd_net_if_mutex
);
16023 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
)
16025 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16027 mutex_unlock(&dhd
->dhd_net_if_mutex
);
16031 static void dhd_suspend_lock(dhd_pub_t
*pub
)
16033 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16034 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16036 mutex_lock(&dhd
->dhd_suspend_mutex
);
16040 static void dhd_suspend_unlock(dhd_pub_t
*pub
)
16042 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16043 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16045 mutex_unlock(&dhd
->dhd_suspend_mutex
);
16049 unsigned long dhd_os_general_spin_lock(dhd_pub_t
*pub
)
16051 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16052 unsigned long flags
= 0;
16055 spin_lock_irqsave(&dhd
->dhd_lock
, flags
);
16060 void dhd_os_general_spin_unlock(dhd_pub_t
*pub
, unsigned long flags
)
16062 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16065 spin_unlock_irqrestore(&dhd
->dhd_lock
, flags
);
16068 /* Linux specific multipurpose spinlock API */
16070 dhd_os_spin_lock_init(osl_t
*osh
)
16072 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16073 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16074 /* and this results in kernel asserts in internal builds */
16075 spinlock_t
* lock
= MALLOC(osh
, sizeof(spinlock_t
) + 4);
16077 spin_lock_init(lock
);
16078 return ((void *)lock
);
16081 dhd_os_spin_lock_deinit(osl_t
*osh
, void *lock
)
16084 MFREE(osh
, lock
, sizeof(spinlock_t
) + 4);
16087 dhd_os_spin_lock(void *lock
)
16089 unsigned long flags
= 0;
16092 spin_lock_irqsave((spinlock_t
*)lock
, flags
);
16097 dhd_os_spin_unlock(void *lock
, unsigned long flags
)
16100 spin_unlock_irqrestore((spinlock_t
*)lock
, flags
);
16104 dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
)
16106 return (atomic_read(&dhd
->pend_8021x_cnt
));
16109 #define MAX_WAIT_FOR_8021X_TX 100
16112 dhd_wait_pend8021x(struct net_device
*dev
)
16114 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16115 int timeout
= msecs_to_jiffies(10);
16116 int ntimes
= MAX_WAIT_FOR_8021X_TX
;
16117 int pend
= dhd_get_pend_8021x_cnt(dhd
);
16119 while (ntimes
&& pend
) {
16121 set_current_state(TASK_INTERRUPTIBLE
);
16122 DHD_PERIM_UNLOCK(&dhd
->pub
);
16123 schedule_timeout(timeout
);
16124 DHD_PERIM_LOCK(&dhd
->pub
);
16125 set_current_state(TASK_RUNNING
);
16128 pend
= dhd_get_pend_8021x_cnt(dhd
);
16132 atomic_set(&dhd
->pend_8021x_cnt
, 0);
16133 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__
));
16138 #if defined(DHD_DEBUG)
16139 int write_file(const char * file_name
, uint32 flags
, uint8
*buf
, int size
)
16142 struct file
*fp
= NULL
;
16143 mm_segment_t old_fs
;
16145 /* change to KERNEL_DS address limit */
16149 /* open file to write */
16150 fp
= filp_open(file_name
, flags
, 0664);
16152 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp
)));
16157 /* Write buf to file */
16158 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
16159 ret
= kernel_write(fp
, buf
, size
, &pos
);
16161 ret
= vfs_write(fp
, buf
, size
, &pos
);
16164 DHD_ERROR(("write file error, err = %d\n", ret
));
16168 /* Sync file from filesystem to physical media */
16169 ret
= vfs_fsync(fp
, 0);
16171 DHD_ERROR(("sync file error, error = %d\n", ret
));
16177 /* close file before return */
16179 filp_close(fp
, current
->files
);
16181 /* restore previous address limit */
16190 dhd_convert_memdump_type_to_str(uint32 type
, char *buf
)
16192 char *type_str
= NULL
;
16195 case DUMP_TYPE_RESUMED_ON_TIMEOUT
:
16196 type_str
= "resumed_on_timeout";
16198 case DUMP_TYPE_D3_ACK_TIMEOUT
:
16199 type_str
= "D3_ACK_timeout";
16201 case DUMP_TYPE_DONGLE_TRAP
:
16202 type_str
= "Dongle_Trap";
16204 case DUMP_TYPE_MEMORY_CORRUPTION
:
16205 type_str
= "Memory_Corruption";
16207 case DUMP_TYPE_PKTID_AUDIT_FAILURE
:
16208 type_str
= "PKTID_AUDIT_Fail";
16210 case DUMP_TYPE_PKTID_INVALID
:
16211 type_str
= "PKTID_INVALID";
16213 case DUMP_TYPE_SCAN_TIMEOUT
:
16214 type_str
= "SCAN_timeout";
16216 case DUMP_TYPE_JOIN_TIMEOUT
:
16217 type_str
= "JOIN_timeout";
16219 case DUMP_TYPE_SCAN_BUSY
:
16220 type_str
= "SCAN_Busy";
16222 case DUMP_TYPE_BY_SYSDUMP
:
16223 type_str
= "BY_SYSDUMP";
16225 case DUMP_TYPE_BY_LIVELOCK
:
16226 type_str
= "BY_LIVELOCK";
16228 case DUMP_TYPE_AP_LINKUP_FAILURE
:
16229 type_str
= "BY_AP_LINK_FAILURE";
16231 case DUMP_TYPE_AP_ABNORMAL_ACCESS
:
16232 type_str
= "INVALID_ACCESS";
16234 case DUMP_TYPE_CFG_VENDOR_TRIGGERED
:
16235 type_str
= "CFG_VENDOR_TRIGGERED";
16237 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX
:
16238 type_str
= "ERROR_RX_TIMED_OUT";
16240 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX
:
16241 type_str
= "ERROR_TX_TIMED_OUT";
16243 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR
:
16244 type_str
= "BY_INVALID_RING_RDWR";
16246 case DUMP_TYPE_DONGLE_HOST_EVENT
:
16247 type_str
= "BY_DONGLE_HOST_EVENT";
16249 case DUMP_TYPE_TRANS_ID_MISMATCH
:
16250 type_str
= "BY_TRANS_ID_MISMATCH";
16252 case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL
:
16253 type_str
= "HANG_IFACE_OP_FAIL";
16255 #ifdef SUPPORT_LINKDOWN_RECOVERY
16256 case DUMP_TYPE_READ_SHM_FAIL
:
16257 type_str
= "READ_SHM_FAIL";
16259 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16261 type_str
= "Unknown_type";
16265 strncpy(buf
, type_str
, strlen(type_str
));
16266 buf
[strlen(type_str
)] = 0;
16270 write_dump_to_file(dhd_pub_t
*dhd
, uint8
*buf
, int size
, char *fname
)
16273 char memdump_path
[128];
16274 char memdump_type
[32];
16275 struct timeval curtime
;
16278 /* Init file name */
16279 memset(memdump_path
, 0, sizeof(memdump_path
));
16280 memset(memdump_type
, 0, sizeof(memdump_type
));
16281 do_gettimeofday(&curtime
);
16282 dhd_convert_memdump_type_to_str(dhd
->memdump_type
, memdump_type
);
16283 #ifdef CUSTOMER_HW4_DEBUG
16284 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16285 DHD_COMMON_DUMP_PATH
, fname
, memdump_type
,
16286 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16287 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16288 #elif defined(CUSTOMER_HW2)
16289 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16290 "/data/misc/wifi/", fname
, memdump_type
,
16291 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16292 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16293 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16294 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16295 "/data/misc/wifi/", fname
, memdump_type
,
16296 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16297 file_mode
= O_CREAT
| O_WRONLY
;
16299 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16300 "/installmedia/", fname
, memdump_type
,
16301 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16302 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16303 * calling BUG_ON immediately after collecting the socram dump.
16304 * So the file write operation should directly write the contents into the
16305 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16306 * instead of appending.
16308 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16310 struct file
*fp
= filp_open(memdump_path
, file_mode
, 0664);
16311 /* Check if it is live Brix image having /installmedia, else use /data */
16313 DHD_ERROR(("open file %s, try /data/\n", memdump_path
));
16314 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16315 "/data/", fname
, memdump_type
,
16316 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16318 filp_close(fp
, NULL
);
16321 #endif /* CUSTOMER_HW4_DEBUG */
16323 /* print SOCRAM dump file path */
16324 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__
, memdump_path
));
16327 ret
= write_file(memdump_path
, file_mode
, buf
, size
);
16331 #endif /* DHD_DEBUG */
16333 int dhd_os_wake_lock_timeout(dhd_pub_t
*pub
)
16335 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16336 unsigned long flags
;
16339 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16340 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16341 ret
= dhd
->wakelock_rx_timeout_enable
> dhd
->wakelock_ctrl_timeout_enable
?
16342 dhd
->wakelock_rx_timeout_enable
: dhd
->wakelock_ctrl_timeout_enable
;
16343 #ifdef CONFIG_HAS_WAKELOCK
16344 if (dhd
->wakelock_rx_timeout_enable
)
16345 wake_lock_timeout(&dhd
->wl_rxwake
,
16346 msecs_to_jiffies(dhd
->wakelock_rx_timeout_enable
));
16347 if (dhd
->wakelock_ctrl_timeout_enable
)
16348 wake_lock_timeout(&dhd
->wl_ctrlwake
,
16349 msecs_to_jiffies(dhd
->wakelock_ctrl_timeout_enable
));
16351 dhd
->wakelock_rx_timeout_enable
= 0;
16352 dhd
->wakelock_ctrl_timeout_enable
= 0;
16353 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16358 int net_os_wake_lock_timeout(struct net_device
*dev
)
16360 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16364 ret
= dhd_os_wake_lock_timeout(&dhd
->pub
);
16368 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t
*pub
, int val
)
16370 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16371 unsigned long flags
;
16373 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16374 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16375 if (val
> dhd
->wakelock_rx_timeout_enable
)
16376 dhd
->wakelock_rx_timeout_enable
= val
;
16377 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16382 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t
*pub
, int val
)
16384 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16385 unsigned long flags
;
16387 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16388 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16389 if (val
> dhd
->wakelock_ctrl_timeout_enable
)
16390 dhd
->wakelock_ctrl_timeout_enable
= val
;
16391 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16396 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t
*pub
)
16398 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16399 unsigned long flags
;
16401 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16402 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16403 dhd
->wakelock_ctrl_timeout_enable
= 0;
16404 #ifdef CONFIG_HAS_WAKELOCK
16405 if (wake_lock_active(&dhd
->wl_ctrlwake
))
16406 wake_unlock(&dhd
->wl_ctrlwake
);
16408 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16413 int net_os_wake_lock_rx_timeout_enable(struct net_device
*dev
, int val
)
16415 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16419 ret
= dhd_os_wake_lock_rx_timeout_enable(&dhd
->pub
, val
);
16423 int net_os_wake_lock_ctrl_timeout_enable(struct net_device
*dev
, int val
)
16425 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16429 ret
= dhd_os_wake_lock_ctrl_timeout_enable(&dhd
->pub
, val
);
16434 #if defined(DHD_TRACE_WAKE_LOCK)
16435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16436 #include <linux/hashtable.h>
16438 #include <linux/hash.h>
16439 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16442 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16443 /* Define 2^5 = 32 bucket size hash table */
16444 DEFINE_HASHTABLE(wklock_history
, 5);
16446 /* Define 2^5 = 32 bucket size hash table */
16447 struct hlist_head wklock_history
[32] = { [0 ... 31] = HLIST_HEAD_INIT
};
16448 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16450 int trace_wklock_onoff
= 1;
16451 typedef enum dhd_wklock_type
{
16458 struct wk_trace_record
{
16459 unsigned long addr
; /* Address of the instruction */
16460 dhd_wklock_t lock_type
; /* lock_type */
16461 unsigned long long counter
; /* counter information */
16462 struct hlist_node wklock_node
; /* hash node */
16465 static struct wk_trace_record
*find_wklock_entry(unsigned long addr
)
16467 struct wk_trace_record
*wklock_info
;
16468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16469 hash_for_each_possible(wklock_history
, wklock_info
, wklock_node
, addr
)
16471 struct hlist_node
*entry
;
16472 int index
= hash_long(addr
, ilog2(ARRAY_SIZE(wklock_history
)));
16473 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[index
], wklock_node
)
16474 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16476 if (wklock_info
->addr
== addr
) {
16477 return wklock_info
;
16484 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16485 #define HASH_ADD(hashtable, node, key) \
16487 hash_add(hashtable, node, key); \
16490 #define HASH_ADD(hashtable, node, key) \
16492 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16493 hlist_add_head(node, &hashtable[index]); \
16495 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16497 #define STORE_WKLOCK_RECORD(wklock_type) \
16499 struct wk_trace_record *wklock_info = NULL; \
16500 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16501 wklock_info = find_wklock_entry(func_addr); \
16502 if (wklock_info) { \
16503 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16504 wklock_info->counter = dhd->wakelock_counter; \
16506 wklock_info->counter++; \
16509 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16510 if (!wklock_info) {\
16511 printk("Can't allocate wk_trace_record \n"); \
16513 wklock_info->addr = func_addr; \
16514 wklock_info->lock_type = wklock_type; \
16515 if (wklock_type == DHD_WAIVE_LOCK || \
16516 wklock_type == DHD_RESTORE_LOCK) { \
16517 wklock_info->counter = dhd->wakelock_counter; \
16519 wklock_info->counter++; \
16521 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16526 static inline void dhd_wk_lock_rec_dump(void)
16529 struct wk_trace_record
*wklock_info
;
16531 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16532 hash_for_each(wklock_history
, bkt
, wklock_info
, wklock_node
)
16534 struct hlist_node
*entry
= NULL
;
16535 int max_index
= ARRAY_SIZE(wklock_history
);
16536 for (bkt
= 0; bkt
< max_index
; bkt
++)
16537 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[bkt
], wklock_node
)
16538 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16540 switch (wklock_info
->lock_type
) {
16541 case DHD_WAKE_LOCK
:
16542 printk("wakelock lock : %pS lock_counter : %llu \n",
16543 (void *)wklock_info
->addr
, wklock_info
->counter
);
16545 case DHD_WAKE_UNLOCK
:
16546 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
16547 (void *)wklock_info
->addr
, wklock_info
->counter
);
16549 case DHD_WAIVE_LOCK
:
16550 printk("wakelock waive : %pS before_waive : %llu \n",
16551 (void *)wklock_info
->addr
, wklock_info
->counter
);
16553 case DHD_RESTORE_LOCK
:
16554 printk("wakelock restore : %pS, after_waive : %llu \n",
16555 (void *)wklock_info
->addr
, wklock_info
->counter
);
16561 static void dhd_wk_lock_trace_init(struct dhd_info
*dhd
)
16563 unsigned long flags
;
16564 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16566 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16568 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16569 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16570 hash_init(wklock_history
);
16572 for (i
= 0; i
< ARRAY_SIZE(wklock_history
); i
++)
16573 INIT_HLIST_HEAD(&wklock_history
[i
]);
16574 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16575 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16578 static void dhd_wk_lock_trace_deinit(struct dhd_info
*dhd
)
16581 struct wk_trace_record
*wklock_info
;
16582 struct hlist_node
*tmp
;
16583 unsigned long flags
;
16584 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16585 struct hlist_node
*entry
= NULL
;
16586 int max_index
= ARRAY_SIZE(wklock_history
);
16587 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16589 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16591 hash_for_each_safe(wklock_history
, bkt
, tmp
, wklock_info
, wklock_node
)
16593 for (bkt
= 0; bkt
< max_index
; bkt
++)
16594 hlist_for_each_entry_safe(wklock_info
, entry
, tmp
,
16595 &wklock_history
[bkt
], wklock_node
)
16596 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16599 hash_del(&wklock_info
->wklock_node
);
16601 hlist_del_init(&wklock_info
->wklock_node
);
16602 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16603 kfree(wklock_info
);
16605 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16608 void dhd_wk_lock_stats_dump(dhd_pub_t
*dhdp
)
16610 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
16611 unsigned long flags
;
16613 printk(KERN_ERR
"DHD Printing wl_wake Lock/Unlock Record \r\n");
16614 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16615 dhd_wk_lock_rec_dump();
16616 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16620 #define STORE_WKLOCK_RECORD(wklock_type)
16621 #endif /* ! DHD_TRACE_WAKE_LOCK */
16623 int dhd_os_wake_lock(dhd_pub_t
*pub
)
16625 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16626 unsigned long flags
;
16629 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16630 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16631 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
16632 #ifdef CONFIG_HAS_WAKELOCK
16633 wake_lock(&dhd
->wl_wifi
);
16634 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16635 dhd_bus_dev_pm_stay_awake(pub
);
16638 #ifdef DHD_TRACE_WAKE_LOCK
16639 if (trace_wklock_onoff
) {
16640 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK
);
16642 #endif /* DHD_TRACE_WAKE_LOCK */
16643 dhd
->wakelock_counter
++;
16644 ret
= dhd
->wakelock_counter
;
16645 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16651 void dhd_event_wake_lock(dhd_pub_t
*pub
)
16653 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16656 #ifdef CONFIG_HAS_WAKELOCK
16657 wake_lock(&dhd
->wl_evtwake
);
16658 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16659 dhd_bus_dev_pm_stay_awake(pub
);
16665 dhd_pm_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
16667 #ifdef CONFIG_HAS_WAKELOCK
16668 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16671 wake_lock_timeout(&dhd
->wl_pmwake
, msecs_to_jiffies(val
));
16673 #endif /* CONFIG_HAS_WAKE_LOCK */
16677 dhd_txfl_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
16679 #ifdef CONFIG_HAS_WAKELOCK
16680 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16683 wake_lock_timeout(&dhd
->wl_txflwake
, msecs_to_jiffies(val
));
16685 #endif /* CONFIG_HAS_WAKE_LOCK */
16688 int net_os_wake_lock(struct net_device
*dev
)
16690 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16694 ret
= dhd_os_wake_lock(&dhd
->pub
);
16698 int dhd_os_wake_unlock(dhd_pub_t
*pub
)
16700 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16701 unsigned long flags
;
16704 dhd_os_wake_lock_timeout(pub
);
16705 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16706 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16708 if (dhd
->wakelock_counter
> 0) {
16709 dhd
->wakelock_counter
--;
16710 #ifdef DHD_TRACE_WAKE_LOCK
16711 if (trace_wklock_onoff
) {
16712 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK
);
16714 #endif /* DHD_TRACE_WAKE_LOCK */
16715 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
16716 #ifdef CONFIG_HAS_WAKELOCK
16717 wake_unlock(&dhd
->wl_wifi
);
16718 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16719 dhd_bus_dev_pm_relax(pub
);
16722 ret
= dhd
->wakelock_counter
;
16724 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16729 void dhd_event_wake_unlock(dhd_pub_t
*pub
)
16731 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16734 #ifdef CONFIG_HAS_WAKELOCK
16735 wake_unlock(&dhd
->wl_evtwake
);
16736 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16737 dhd_bus_dev_pm_relax(pub
);
16742 void dhd_pm_wake_unlock(dhd_pub_t
*pub
)
16744 #ifdef CONFIG_HAS_WAKELOCK
16745 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16748 /* if wl_pmwake is active, unlock it */
16749 if (wake_lock_active(&dhd
->wl_pmwake
)) {
16750 wake_unlock(&dhd
->wl_pmwake
);
16753 #endif /* CONFIG_HAS_WAKELOCK */
16756 void dhd_txfl_wake_unlock(dhd_pub_t
*pub
)
16758 #ifdef CONFIG_HAS_WAKELOCK
16759 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16762 /* if wl_txflwake is active, unlock it */
16763 if (wake_lock_active(&dhd
->wl_txflwake
)) {
16764 wake_unlock(&dhd
->wl_txflwake
);
16767 #endif /* CONFIG_HAS_WAKELOCK */
16770 int dhd_os_check_wakelock(dhd_pub_t
*pub
)
16772 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16773 KERNEL_VERSION(2, 6, 36)))
16778 dhd
= (dhd_info_t
*)(pub
->info
);
16779 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16781 #ifdef CONFIG_HAS_WAKELOCK
16782 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
16783 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
16784 (wake_lock_active(&dhd
->wl_wdwake
))))
16786 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16787 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
16794 dhd_os_check_wakelock_all(dhd_pub_t
*pub
)
16796 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16797 KERNEL_VERSION(2, 6, 36)))
16798 #if defined(CONFIG_HAS_WAKELOCK)
16799 int l1
, l2
, l3
, l4
, l7
, l8
, l9
;
16800 int l5
= 0, l6
= 0;
16801 int c
, lock_active
;
16802 #endif /* CONFIG_HAS_WAKELOCK */
16808 dhd
= (dhd_info_t
*)(pub
->info
);
16812 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16814 #ifdef CONFIG_HAS_WAKELOCK
16815 c
= dhd
->wakelock_counter
;
16816 l1
= wake_lock_active(&dhd
->wl_wifi
);
16817 l2
= wake_lock_active(&dhd
->wl_wdwake
);
16818 l3
= wake_lock_active(&dhd
->wl_rxwake
);
16819 l4
= wake_lock_active(&dhd
->wl_ctrlwake
);
16820 l7
= wake_lock_active(&dhd
->wl_evtwake
);
16821 #ifdef BCMPCIE_OOB_HOST_WAKE
16822 l5
= wake_lock_active(&dhd
->wl_intrwake
);
16823 #endif /* BCMPCIE_OOB_HOST_WAKE */
16824 #ifdef DHD_USE_SCAN_WAKELOCK
16825 l6
= wake_lock_active(&dhd
->wl_scanwake
);
16826 #endif /* DHD_USE_SCAN_WAKELOCK */
16827 l8
= wake_lock_active(&dhd
->wl_pmwake
);
16828 l9
= wake_lock_active(&dhd
->wl_txflwake
);
16829 lock_active
= (l1
|| l2
|| l3
|| l4
|| l5
|| l6
|| l7
|| l8
|| l9
);
16831 /* Indicate to the Host to avoid going to suspend if internal locks are up */
16833 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16834 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16835 __FUNCTION__
, c
, l1
, l2
, l3
, l4
, l5
, l6
, l7
, l8
, l9
));
16838 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16839 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
)) {
16842 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
16846 int net_os_wake_unlock(struct net_device
*dev
)
16848 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16852 ret
= dhd_os_wake_unlock(&dhd
->pub
);
16856 int dhd_os_wd_wake_lock(dhd_pub_t
*pub
)
16858 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16859 unsigned long flags
;
16863 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16864 #ifdef CONFIG_HAS_WAKELOCK
16865 /* if wakelock_wd_counter was never used : lock it at once */
16866 if (!dhd
->wakelock_wd_counter
)
16867 wake_lock(&dhd
->wl_wdwake
);
16869 dhd
->wakelock_wd_counter
++;
16870 ret
= dhd
->wakelock_wd_counter
;
16871 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16876 int dhd_os_wd_wake_unlock(dhd_pub_t
*pub
)
16878 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16879 unsigned long flags
;
16883 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16884 if (dhd
->wakelock_wd_counter
) {
16885 dhd
->wakelock_wd_counter
= 0;
16886 #ifdef CONFIG_HAS_WAKELOCK
16887 wake_unlock(&dhd
->wl_wdwake
);
16890 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16895 #ifdef BCMPCIE_OOB_HOST_WAKE
16897 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
16899 #ifdef CONFIG_HAS_WAKELOCK
16900 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16903 wake_lock_timeout(&dhd
->wl_intrwake
, msecs_to_jiffies(val
));
16905 #endif /* CONFIG_HAS_WAKELOCK */
16909 dhd_os_oob_irq_wake_unlock(dhd_pub_t
*pub
)
16911 #ifdef CONFIG_HAS_WAKELOCK
16912 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16915 /* if wl_intrwake is active, unlock it */
16916 if (wake_lock_active(&dhd
->wl_intrwake
)) {
16917 wake_unlock(&dhd
->wl_intrwake
);
16920 #endif /* CONFIG_HAS_WAKELOCK */
16922 #endif /* BCMPCIE_OOB_HOST_WAKE */
16924 #ifdef DHD_USE_SCAN_WAKELOCK
16926 dhd_os_scan_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
16928 #ifdef CONFIG_HAS_WAKELOCK
16929 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16932 wake_lock_timeout(&dhd
->wl_scanwake
, msecs_to_jiffies(val
));
16934 #endif /* CONFIG_HAS_WAKELOCK */
16938 dhd_os_scan_wake_unlock(dhd_pub_t
*pub
)
16940 #ifdef CONFIG_HAS_WAKELOCK
16941 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16944 /* if wl_scanwake is active, unlock it */
16945 if (wake_lock_active(&dhd
->wl_scanwake
)) {
16946 wake_unlock(&dhd
->wl_scanwake
);
16949 #endif /* CONFIG_HAS_WAKELOCK */
16951 #endif /* DHD_USE_SCAN_WAKELOCK */
16953 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
16954 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
16956 int dhd_os_wake_lock_waive(dhd_pub_t
*pub
)
16958 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16959 unsigned long flags
;
16962 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16963 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16965 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16966 if (dhd
->waive_wakelock
== FALSE
) {
16967 #ifdef DHD_TRACE_WAKE_LOCK
16968 if (trace_wklock_onoff
) {
16969 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK
);
16971 #endif /* DHD_TRACE_WAKE_LOCK */
16972 /* record current lock status */
16973 dhd
->wakelock_before_waive
= dhd
->wakelock_counter
;
16974 dhd
->waive_wakelock
= TRUE
;
16976 ret
= dhd
->wakelock_wd_counter
;
16977 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16982 int dhd_os_wake_lock_restore(dhd_pub_t
*pub
)
16984 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16985 unsigned long flags
;
16990 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) == 0)
16993 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16995 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16996 if (!dhd
->waive_wakelock
)
16999 dhd
->waive_wakelock
= FALSE
;
17000 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17001 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17002 * the lock in between, do the same by calling wake_unlock or pm_relax
17004 #ifdef DHD_TRACE_WAKE_LOCK
17005 if (trace_wklock_onoff
) {
17006 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK
);
17008 #endif /* DHD_TRACE_WAKE_LOCK */
17010 if (dhd
->wakelock_before_waive
== 0 && dhd
->wakelock_counter
> 0) {
17011 #ifdef CONFIG_HAS_WAKELOCK
17012 wake_lock(&dhd
->wl_wifi
);
17013 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17014 dhd_bus_dev_pm_stay_awake(&dhd
->pub
);
17016 } else if (dhd
->wakelock_before_waive
> 0 && dhd
->wakelock_counter
== 0) {
17017 #ifdef CONFIG_HAS_WAKELOCK
17018 wake_unlock(&dhd
->wl_wifi
);
17019 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17020 dhd_bus_dev_pm_relax(&dhd
->pub
);
17023 dhd
->wakelock_before_waive
= 0;
17025 ret
= dhd
->wakelock_wd_counter
;
17026 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17030 void dhd_os_wake_lock_init(struct dhd_info
*dhd
)
17032 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__
));
17033 dhd
->wakelock_counter
= 0;
17034 dhd
->wakelock_rx_timeout_enable
= 0;
17035 dhd
->wakelock_ctrl_timeout_enable
= 0;
17036 #ifdef CONFIG_HAS_WAKELOCK
17037 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17038 wake_lock_init(&dhd
->wl_rxwake
, WAKE_LOCK_SUSPEND
, "wlan_rx_wake");
17039 wake_lock_init(&dhd
->wl_ctrlwake
, WAKE_LOCK_SUSPEND
, "wlan_ctrl_wake");
17040 wake_lock_init(&dhd
->wl_evtwake
, WAKE_LOCK_SUSPEND
, "wlan_evt_wake");
17041 wake_lock_init(&dhd
->wl_pmwake
, WAKE_LOCK_SUSPEND
, "wlan_pm_wake");
17042 wake_lock_init(&dhd
->wl_txflwake
, WAKE_LOCK_SUSPEND
, "wlan_txfl_wake");
17043 #ifdef BCMPCIE_OOB_HOST_WAKE
17044 wake_lock_init(&dhd
->wl_intrwake
, WAKE_LOCK_SUSPEND
, "wlan_oob_irq_wake");
17045 #endif /* BCMPCIE_OOB_HOST_WAKE */
17046 #ifdef DHD_USE_SCAN_WAKELOCK
17047 wake_lock_init(&dhd
->wl_scanwake
, WAKE_LOCK_SUSPEND
, "wlan_scan_wake");
17048 #endif /* DHD_USE_SCAN_WAKELOCK */
17049 #endif /* CONFIG_HAS_WAKELOCK */
17050 #ifdef DHD_TRACE_WAKE_LOCK
17051 dhd_wk_lock_trace_init(dhd
);
17052 #endif /* DHD_TRACE_WAKE_LOCK */
17055 void dhd_os_wake_lock_destroy(struct dhd_info
*dhd
)
17057 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__
));
17058 #ifdef CONFIG_HAS_WAKELOCK
17059 dhd
->wakelock_counter
= 0;
17060 dhd
->wakelock_rx_timeout_enable
= 0;
17061 dhd
->wakelock_ctrl_timeout_enable
= 0;
17062 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17063 wake_lock_destroy(&dhd
->wl_rxwake
);
17064 wake_lock_destroy(&dhd
->wl_ctrlwake
);
17065 wake_lock_destroy(&dhd
->wl_evtwake
);
17066 wake_lock_destroy(&dhd
->wl_pmwake
);
17067 wake_lock_destroy(&dhd
->wl_txflwake
);
17068 #ifdef BCMPCIE_OOB_HOST_WAKE
17069 wake_lock_destroy(&dhd
->wl_intrwake
);
17070 #endif /* BCMPCIE_OOB_HOST_WAKE */
17071 #ifdef DHD_USE_SCAN_WAKELOCK
17072 wake_lock_destroy(&dhd
->wl_scanwake
);
17073 #endif /* DHD_USE_SCAN_WAKELOCK */
17074 #ifdef DHD_TRACE_WAKE_LOCK
17075 dhd_wk_lock_trace_deinit(dhd
);
17076 #endif /* DHD_TRACE_WAKE_LOCK */
17077 #endif /* CONFIG_HAS_WAKELOCK */
17080 bool dhd_os_check_if_up(dhd_pub_t
*pub
)
17087 /* function to collect firmware, chip id and chip version info */
17088 void dhd_set_version_info(dhd_pub_t
*dhdp
, char *fw
)
17092 i
= snprintf(info_string
, sizeof(info_string
),
17093 " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR
, fw
, clm_version
);
17094 printf("%s\n", info_string
);
17099 i
= snprintf(&info_string
[i
], sizeof(info_string
) - i
,
17100 "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp
),
17101 dhd_conf_get_chiprev(dhdp
));
17104 int dhd_ioctl_entry_local(struct net_device
*net
, wl_ioctl_t
*ioc
, int cmd
)
17108 dhd_info_t
*dhd
= NULL
;
17110 if (!net
|| !DEV_PRIV(net
)) {
17111 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__
));
17115 dhd
= DHD_DEV_INFO(net
);
17119 ifidx
= dhd_net2idx(dhd
, net
);
17120 if (ifidx
== DHD_BAD_IF
) {
17121 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
17125 DHD_OS_WAKE_LOCK(&dhd
->pub
);
17126 DHD_PERIM_LOCK(&dhd
->pub
);
17128 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, ioc
, ioc
->buf
, ioc
->len
);
17129 dhd_check_hang(net
, &dhd
->pub
, ret
);
17131 DHD_PERIM_UNLOCK(&dhd
->pub
);
17132 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
17137 bool dhd_os_check_hang(dhd_pub_t
*dhdp
, int ifidx
, int ret
)
17139 struct net_device
*net
;
17141 net
= dhd_idx2net(dhdp
, ifidx
);
17143 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__
, ifidx
));
17147 return dhd_check_hang(net
, dhdp
, ret
);
17150 /* Return instance */
17151 int dhd_get_instance(dhd_pub_t
*dhdp
)
17153 return dhdp
->info
->unit
;
17157 #ifdef PROP_TXSTATUS
17159 void dhd_wlfc_plat_init(void *dhd
)
17161 #ifdef USE_DYNAMIC_F2_BLKSIZE
17162 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
17163 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17167 void dhd_wlfc_plat_deinit(void *dhd
)
17169 #ifdef USE_DYNAMIC_F2_BLKSIZE
17170 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, sd_f2_blocksize
);
17171 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17175 bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
)
17177 #ifdef SKIP_WLFC_ON_CONCURRENT
17180 struct net_device
* net
= dhd_idx2net((dhd_pub_t
*)dhdp
, idx
);
17182 /* enable flow control in vsdb mode */
17183 return !(wl_cfg80211_is_concurrent_mode(net
));
17185 return TRUE
; /* skip flow control */
17186 #endif /* WL_CFG80211 */
17190 #endif /* SKIP_WLFC_ON_CONCURRENT */
17193 #endif /* PROP_TXSTATUS */
17196 #include <linux/debugfs.h>
17198 typedef struct dhd_dbgfs
{
17199 struct dentry
*debugfs_dir
;
17200 struct dentry
*debugfs_mem
;
17205 dhd_dbgfs_t g_dbgfs
;
17207 extern uint32
dhd_readregl(void *bp
, uint32 addr
);
17208 extern uint32
dhd_writeregl(void *bp
, uint32 addr
, uint32 data
);
17211 dhd_dbg_state_open(struct inode
*inode
, struct file
*file
)
17213 file
->private_data
= inode
->i_private
;
17218 dhd_dbg_state_read(struct file
*file
, char __user
*ubuf
,
17219 size_t count
, loff_t
*ppos
)
17223 loff_t pos
= *ppos
;
17228 if (pos
>= g_dbgfs
.size
|| !count
)
17230 if (count
> g_dbgfs
.size
- pos
)
17231 count
= g_dbgfs
.size
- pos
;
17233 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17234 tmp
= dhd_readregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3));
17236 ret
= copy_to_user(ubuf
, &tmp
, 4);
17241 *ppos
= pos
+ count
;
17249 dhd_debugfs_write(struct file
*file
, const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
17251 loff_t pos
= *ppos
;
17257 if (pos
>= g_dbgfs
.size
|| !count
)
17259 if (count
> g_dbgfs
.size
- pos
)
17260 count
= g_dbgfs
.size
- pos
;
17262 ret
= copy_from_user(&buf
, ubuf
, sizeof(uint32
));
17266 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17267 dhd_writeregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3), buf
);
17274 dhd_debugfs_lseek(struct file
*file
, loff_t off
, int whence
)
17283 pos
= file
->f_pos
+ off
;
17286 pos
= g_dbgfs
.size
- off
;
17288 return (pos
< 0 || pos
> g_dbgfs
.size
) ? -EINVAL
: (file
->f_pos
= pos
);
17291 static const struct file_operations dhd_dbg_state_ops
= {
17292 .read
= dhd_dbg_state_read
,
17293 .write
= dhd_debugfs_write
,
17294 .open
= dhd_dbg_state_open
,
17295 .llseek
= dhd_debugfs_lseek
17298 static void dhd_dbgfs_create(void)
17300 if (g_dbgfs
.debugfs_dir
) {
17301 g_dbgfs
.debugfs_mem
= debugfs_create_file("mem", 0644, g_dbgfs
.debugfs_dir
,
17302 NULL
, &dhd_dbg_state_ops
);
17306 void dhd_dbgfs_init(dhd_pub_t
*dhdp
)
17308 g_dbgfs
.dhdp
= dhdp
;
17309 g_dbgfs
.size
= 0x20000000; /* Allow access to various cores regs */
17311 g_dbgfs
.debugfs_dir
= debugfs_create_dir("dhd", 0);
17312 if (IS_ERR(g_dbgfs
.debugfs_dir
)) {
17313 g_dbgfs
.debugfs_dir
= NULL
;
17317 dhd_dbgfs_create();
17322 void dhd_dbgfs_remove(void)
17324 debugfs_remove(g_dbgfs
.debugfs_mem
);
17325 debugfs_remove(g_dbgfs
.debugfs_dir
);
17327 bzero((unsigned char *) &g_dbgfs
, sizeof(g_dbgfs
));
17329 #endif /* BCMDBGFS */
17331 #ifdef WLMEDIA_HTSF
17334 void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
)
17336 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
17337 struct sk_buff
*skb
;
17339 uint16 dport
= 0, oldmagic
= 0xACAC;
17343 /* timestamp packet */
17345 p1
= (char*) PKTDATA(dhdp
->osh
, pktbuf
);
17347 if (PKTLEN(dhdp
->osh
, pktbuf
) > HTSF_MINLEN
) {
17348 /* memcpy(&proto, p1+26, 4); */
17349 memcpy(&dport
, p1
+40, 2);
17350 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
17351 dport
= ntoh16(dport
);
17354 /* timestamp only if icmp or udb iperf with port 5555 */
17355 /* if (proto == 17 && dport == tsport) { */
17356 if (dport
>= tsport
&& dport
<= tsport
+ 20) {
17358 skb
= (struct sk_buff
*) pktbuf
;
17360 htsf
= dhd_get_htsf(dhd
, 0);
17361 memset(skb
->data
+ 44, 0, 2); /* clear checksum */
17362 memcpy(skb
->data
+82, &oldmagic
, 2);
17363 memcpy(skb
->data
+84, &htsf
, 4);
17365 memset(&ts
, 0, sizeof(htsfts_t
));
17366 ts
.magic
= HTSFMAGIC
;
17367 ts
.prio
= PKTPRIO(pktbuf
);
17368 ts
.seqnum
= htsf_seqnum
++;
17369 ts
.c10
= get_cycles();
17371 ts
.endmagic
= HTSFENDMAGIC
;
17373 memcpy(skb
->data
+ HTSF_HOSTOFFSET
, &ts
, sizeof(ts
));
17377 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
)
17379 int pktcnt
= 0, curval
= 0, i
;
17380 for (i
= 0; i
< (NUMBIN
-2); i
++) {
17382 printf("%d ", his
->bin
[i
]);
17383 pktcnt
+= his
->bin
[i
];
17385 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his
->bin
[NUMBIN
-2], pktcnt
,
17386 his
->bin
[NUMBIN
-1], s
);
17390 void sorttobin(int value
, histo_t
*histo
)
17395 histo
->bin
[NUMBIN
-1]++;
17398 if (value
> histo
->bin
[NUMBIN
-2]) /* store the max value */
17399 histo
->bin
[NUMBIN
-2] = value
;
17401 for (i
= 0; i
< (NUMBIN
-2); i
++) {
17402 binval
+= 500; /* 500m s bins */
17403 if (value
<= binval
) {
17408 histo
->bin
[NUMBIN
-3]++;
17412 void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
)
17414 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
17415 struct sk_buff
*skb
;
17418 int d1
, d2
, d3
, end2end
;
17422 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
17423 p1
= (char*)PKTDATA(dhdp
->osh
, pktbuf
);
17425 if (PKTLEN(osh
, pktbuf
) > HTSF_MINLEN
) {
17426 memcpy(&old_magic
, p1
+78, 2);
17427 htsf_ts
= (htsfts_t
*) (p1
+ HTSF_HOSTOFFSET
- 4);
17432 if (htsf_ts
->magic
== HTSFMAGIC
) {
17433 htsf_ts
->tE0
= dhd_get_htsf(dhd
, 0);
17434 htsf_ts
->cE0
= get_cycles();
17437 if (old_magic
== 0xACAC) {
17440 htsf
= dhd_get_htsf(dhd
, 0);
17441 memcpy(skb
->data
+92, &htsf
, sizeof(uint32
));
17443 memcpy(&ts
[tsidx
].t1
, skb
->data
+80, 16);
17445 d1
= ts
[tsidx
].t2
- ts
[tsidx
].t1
;
17446 d2
= ts
[tsidx
].t3
- ts
[tsidx
].t2
;
17447 d3
= ts
[tsidx
].t4
- ts
[tsidx
].t3
;
17448 end2end
= ts
[tsidx
].t4
- ts
[tsidx
].t1
;
17450 sorttobin(d1
, &vi_d1
);
17451 sorttobin(d2
, &vi_d2
);
17452 sorttobin(d3
, &vi_d3
);
17453 sorttobin(end2end
, &vi_d4
);
17455 if (end2end
> 0 && end2end
> maxdelay
) {
17456 maxdelay
= end2end
;
17457 maxdelaypktno
= tspktcnt
;
17458 memcpy(&maxdelayts
, &ts
[tsidx
], 16);
17460 if (++tsidx
>= TSMAX
)
17465 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
)
17467 uint32 htsf
= 0, cur_cycle
, delta
, delta_us
;
17468 uint32 factor
, baseval
, baseval2
;
17474 if (cur_cycle
> dhd
->htsf
.last_cycle
)
17475 delta
= cur_cycle
- dhd
->htsf
.last_cycle
;
17477 delta
= cur_cycle
+ (0xFFFFFFFF - dhd
->htsf
.last_cycle
);
17480 delta
= delta
>> 4;
17482 if (dhd
->htsf
.coef
) {
17483 /* times ten to get the first digit */
17484 factor
= (dhd
->htsf
.coef
*10 + dhd
->htsf
.coefdec1
);
17485 baseval
= (delta
*10)/factor
;
17486 baseval2
= (delta
*10)/(factor
+1);
17487 delta_us
= (baseval
- (((baseval
- baseval2
) * dhd
->htsf
.coefdec2
)) / 10);
17488 htsf
= (delta_us
<< 4) + dhd
->htsf
.last_tsf
+ HTSF_BUS_DELAY
;
17490 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
17496 static void dhd_dump_latency(void)
17499 int d1
, d2
, d3
, d4
, d5
;
17501 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
17502 for (i
= 0; i
< TSMAX
; i
++) {
17503 d1
= ts
[i
].t2
- ts
[i
].t1
;
17504 d2
= ts
[i
].t3
- ts
[i
].t2
;
17505 d3
= ts
[i
].t4
- ts
[i
].t3
;
17506 d4
= ts
[i
].t4
- ts
[i
].t1
;
17507 d5
= ts
[max
].t4
-ts
[max
].t1
;
17508 if (d4
> d5
&& d4
> 0) {
17511 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
17512 ts
[i
].t1
, ts
[i
].t2
, ts
[i
].t3
, ts
[i
].t4
,
17513 d1
, d2
, d3
, d4
, i
);
17516 printf("current idx = %d \n", tsidx
);
17518 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay
, maxdelaypktno
, tspktcnt
);
17519 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
17520 maxdelayts
.t1
, maxdelayts
.t2
, maxdelayts
.t3
, maxdelayts
.t4
,
17521 maxdelayts
.t2
- maxdelayts
.t1
,
17522 maxdelayts
.t3
- maxdelayts
.t2
,
17523 maxdelayts
.t4
- maxdelayts
.t3
,
17524 maxdelayts
.t4
- maxdelayts
.t1
);
17529 dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
)
17540 memset(&tsf_buf
, 0, sizeof(tsf_buf
));
17542 s1
= dhd_get_htsf(dhd
, 0);
17543 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "tsf", NULL
, 0, buf
, sizeof(buf
), FALSE
);
17546 DHD_ERROR(("%s: tsf is not supported by device\n",
17547 dhd_ifname(&dhd
->pub
, ifidx
)));
17548 return -EOPNOTSUPP
;
17552 s2
= dhd_get_htsf(dhd
, 0);
17554 memcpy(&tsf_buf
, buf
, sizeof(tsf_buf
));
17555 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
17556 tsf_buf
.high
, tsf_buf
.low
, s2
, dhd
->htsf
.coef
, dhd
->htsf
.coefdec1
,
17557 dhd
->htsf
.coefdec2
, s2
-tsf_buf
.low
);
17558 printf("lasttsf=%08X lastcycle=%08X\n", dhd
->htsf
.last_tsf
, dhd
->htsf
.last_cycle
);
17562 void htsf_update(dhd_info_t
*dhd
, void *data
)
17564 static ulong cur_cycle
= 0, prev_cycle
= 0;
17565 uint32 htsf
, tsf_delta
= 0;
17566 uint32 hfactor
= 0, cyc_delta
, dec1
= 0, dec2
, dec3
, tmp
;
17570 /* cycles_t in inlcude/mips/timex.h */
17574 prev_cycle
= cur_cycle
;
17577 if (cur_cycle
> prev_cycle
)
17578 cyc_delta
= cur_cycle
- prev_cycle
;
17582 cyc_delta
= cur_cycle
+ (0xFFFFFFFF - prev_cycle
);
17586 printf(" tsf update ata point er is null \n");
17588 memcpy(&prev_tsf
, &cur_tsf
, sizeof(tsf_t
));
17589 memcpy(&cur_tsf
, data
, sizeof(tsf_t
));
17591 if (cur_tsf
.low
== 0) {
17592 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
17596 if (cur_tsf
.low
> prev_tsf
.low
)
17597 tsf_delta
= (cur_tsf
.low
- prev_tsf
.low
);
17599 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
17600 cur_tsf
.low
, prev_tsf
.low
));
17601 if (cur_tsf
.high
> prev_tsf
.high
) {
17602 tsf_delta
= cur_tsf
.low
+ (0xFFFFFFFF - prev_tsf
.low
);
17603 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta
));
17605 return; /* do not update */
17610 hfactor
= cyc_delta
/ tsf_delta
;
17611 tmp
= (cyc_delta
- (hfactor
* tsf_delta
))*10;
17612 dec1
= tmp
/tsf_delta
;
17613 dec2
= ((tmp
- dec1
*tsf_delta
)*10) / tsf_delta
;
17614 tmp
= (tmp
- (dec1
*tsf_delta
))*10;
17615 dec3
= ((tmp
- dec2
*tsf_delta
)*10) / tsf_delta
;
17633 htsf
= ((cyc_delta
* 10) / (hfactor
*10+dec1
)) + prev_tsf
.low
;
17634 dhd
->htsf
.coef
= hfactor
;
17635 dhd
->htsf
.last_cycle
= cur_cycle
;
17636 dhd
->htsf
.last_tsf
= cur_tsf
.low
;
17637 dhd
->htsf
.coefdec1
= dec1
;
17638 dhd
->htsf
.coefdec2
= dec2
;
17640 htsf
= prev_tsf
.low
;
17644 #endif /* WLMEDIA_HTSF */
17646 #ifdef CUSTOM_SET_CPUCORE
17647 void dhd_set_cpucore(dhd_pub_t
*dhd
, int set
)
17649 int e_dpc
= 0, e_rxf
= 0, retry_set
= 0;
17651 if (!(dhd
->chan_isvht80
)) {
17652 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__
, dhd
->chan_isvht80
));
17659 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17660 cpumask_of(DPC_CPUCORE
));
17662 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17663 cpumask_of(PRIMARY_CPUCORE
));
17665 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17666 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__
, e_dpc
));
17671 } while (e_dpc
< 0);
17676 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17677 cpumask_of(RXF_CPUCORE
));
17679 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17680 cpumask_of(PRIMARY_CPUCORE
));
17682 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17683 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__
, e_rxf
));
17688 } while (e_rxf
< 0);
17690 #ifdef DHD_OF_SUPPORT
17691 interrupt_set_cpucore(set
, DPC_CPUCORE
, PRIMARY_CPUCORE
);
17692 #endif /* DHD_OF_SUPPORT */
17693 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__
, set
));
17697 #endif /* CUSTOM_SET_CPUCORE */
17699 #ifdef DHD_MCAST_REGEN
17700 /* Get interface specific ap_isolate configuration */
17701 int dhd_get_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
)
17703 dhd_info_t
*dhd
= dhdp
->info
;
17706 ASSERT(idx
< DHD_MAX_IFS
);
17708 ifp
= dhd
->iflist
[idx
];
17710 return ifp
->mcast_regen_bss_enable
;
17713 /* Set interface specific mcast_regen configuration */
17714 int dhd_set_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
17716 dhd_info_t
*dhd
= dhdp
->info
;
17719 ASSERT(idx
< DHD_MAX_IFS
);
17721 ifp
= dhd
->iflist
[idx
];
17723 ifp
->mcast_regen_bss_enable
= val
;
17725 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17728 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
17731 #endif /* DHD_MCAST_REGEN */
17733 /* Get interface specific ap_isolate configuration */
17734 int dhd_get_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
)
17736 dhd_info_t
*dhd
= dhdp
->info
;
17739 ASSERT(idx
< DHD_MAX_IFS
);
17741 ifp
= dhd
->iflist
[idx
];
17743 return ifp
->ap_isolate
;
17746 /* Set interface specific ap_isolate configuration */
17747 int dhd_set_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
17749 dhd_info_t
*dhd
= dhdp
->info
;
17752 ASSERT(idx
< DHD_MAX_IFS
);
17754 ifp
= dhd
->iflist
[idx
];
17757 ifp
->ap_isolate
= val
;
17762 #ifdef DHD_FW_COREDUMP
17763 #if defined(CONFIG_X86)
17764 #define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
17765 #define MEMDUMPINFO_INST "/data/.memdump.info"
17766 #endif /* CONFIG_X86 && OEM_ANDROID */
17768 #ifdef CUSTOMER_HW4_DEBUG
17769 #define MEMDUMPINFO PLATFORM_PATH".memdump.info"
17770 #elif defined(CUSTOMER_HW2)
17771 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17772 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
17773 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17775 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17776 #endif /* CUSTOMER_HW4_DEBUG */
17778 void dhd_get_memdump_info(dhd_pub_t
*dhd
)
17780 struct file
*fp
= NULL
;
17781 uint32 mem_val
= DUMP_MEMFILE_MAX
;
17783 char *filepath
= MEMDUMPINFO
;
17785 /* Read memdump info from the file */
17786 fp
= filp_open(filepath
, O_RDONLY
, 0);
17788 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__
, filepath
));
17789 #if defined(CONFIG_X86)
17790 /* Check if it is Live Brix Image */
17791 if (strcmp(filepath
, MEMDUMPINFO_LIVE
) != 0) {
17794 /* Try if it is Installed Brix Image */
17795 filepath
= MEMDUMPINFO_INST
;
17796 DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__
, filepath
));
17797 fp
= filp_open(filepath
, O_RDONLY
, 0);
17799 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__
, filepath
));
17802 #else /* Non Brix Android platform */
17804 #endif /* CONFIG_X86 && OEM_ANDROID */
17807 /* Handle success case */
17808 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
17809 ret
= kernel_read(fp
, (char *)&mem_val
, 4, NULL
);
17811 ret
= kernel_read(fp
, 0, (char *)&mem_val
, 4);
17814 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__
, ret
));
17815 filp_close(fp
, NULL
);
17819 mem_val
= bcm_atoi((char *)&mem_val
);
17821 filp_close(fp
, NULL
);
17823 #ifdef DHD_INIT_DEFAULT_MEMDUMP
17824 if (mem_val
== 0 || mem_val
== DUMP_MEMFILE_MAX
)
17825 mem_val
= DUMP_MEMFILE_BUGON
;
17826 #endif /* DHD_INIT_DEFAULT_MEMDUMP */
17829 #ifdef CUSTOMER_HW4_DEBUG
17830 dhd
->memdump_enabled
= (mem_val
< DUMP_MEMFILE_MAX
) ? mem_val
: DUMP_DISABLED
;
17832 dhd
->memdump_enabled
= (mem_val
< DUMP_MEMFILE_MAX
) ? mem_val
: DUMP_MEMFILE
;
17833 #endif /* CUSTOMER_HW4_DEBUG */
17835 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__
, dhd
->memdump_enabled
));
17838 void dhd_schedule_memdump(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 size
)
17840 dhd_dump_t
*dump
= NULL
;
17841 dump
= (dhd_dump_t
*)MALLOC(dhdp
->osh
, sizeof(dhd_dump_t
));
17842 if (dump
== NULL
) {
17843 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__
));
17847 dump
->bufsize
= size
;
17849 #if defined(CONFIG_ARM64)
17850 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__
,
17851 (uint64
)buf
, (uint64
)__virt_to_phys((ulong
)buf
), size
));
17852 #elif defined(__ARM_ARCH_7A__)
17853 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__
,
17854 (uint32
)buf
, (uint32
)__virt_to_phys((ulong
)buf
), size
));
17855 #endif /* __ARM_ARCH_7A__ */
17856 if (dhdp
->memdump_enabled
== DUMP_MEMONLY
) {
17860 #ifdef DHD_LOG_DUMP
17861 if (dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
) {
17862 dhd_schedule_log_dump(dhdp
);
17864 #endif /* DHD_LOG_DUMP */
17865 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dump
,
17866 DHD_WQ_WORK_SOC_RAM_DUMP
, dhd_mem_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
17870 dhd_mem_dump(void *handle
, void *event_info
, u8 event
)
17872 dhd_info_t
*dhd
= handle
;
17873 dhd_dump_t
*dump
= event_info
;
17876 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
17881 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__
));
17885 if (write_dump_to_file(&dhd
->pub
, dump
->buf
, dump
->bufsize
, "mem_dump")) {
17886 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__
));
17887 dhd
->pub
.memdump_success
= FALSE
;
17890 if (dhd
->pub
.memdump_enabled
== DUMP_MEMFILE_BUGON
&&
17891 #ifdef DHD_LOG_DUMP
17892 dhd
->pub
.memdump_type
!= DUMP_TYPE_BY_SYSDUMP
&&
17893 #endif /* DHD_LOG_DUMP */
17894 #ifdef DHD_DEBUG_UART
17895 dhd
->pub
.memdump_success
== TRUE
&&
17896 #endif /* DHD_DEBUG_UART */
17897 dhd
->pub
.memdump_type
!= DUMP_TYPE_CFG_VENDOR_TRIGGERED
) {
17899 #ifdef SHOW_LOGTRACE
17900 /* Wait till event_log_dispatcher_work finishes */
17901 cancel_work_sync(&dhd
->event_log_dispatcher_work
);
17902 #endif /* SHOW_LOGTRACE */
17906 MFREE(dhd
->pub
.osh
, dump
, sizeof(dhd_dump_t
));
17908 #endif /* DHD_FW_COREDUMP */
17910 #ifdef DHD_SSSR_DUMP
17913 dhd_sssr_dump(void *handle
, void *event_info
, u8 event
)
17915 dhd_info_t
*dhd
= handle
;
17918 char before_sr_dump
[128];
17919 char after_sr_dump
[128];
17922 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
17928 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
17929 /* Init file name */
17930 memset(before_sr_dump
, 0, sizeof(before_sr_dump
));
17931 memset(after_sr_dump
, 0, sizeof(after_sr_dump
));
17933 snprintf(before_sr_dump
, sizeof(before_sr_dump
), "%s_%d_%s",
17934 "sssr_core", i
, "before_SR");
17935 snprintf(after_sr_dump
, sizeof(after_sr_dump
), "%s_%d_%s",
17936 "sssr_core", i
, "after_SR");
17938 if (dhdp
->sssr_d11_before
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
17939 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_before
[i
],
17940 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, before_sr_dump
)) {
17941 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
17945 if (dhdp
->sssr_d11_after
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
17946 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_after
[i
],
17947 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, after_sr_dump
)) {
17948 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
17954 if (dhdp
->sssr_vasip_buf_before
) {
17955 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_vasip_buf_before
,
17956 dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
, "sssr_vasip_before_SR")) {
17957 DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n",
17962 if (dhdp
->sssr_vasip_buf_after
) {
17963 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_vasip_buf_after
,
17964 dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
, "sssr_vasip_after_SR")) {
17965 DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n",
17973 dhd_schedule_sssr_dump(dhd_pub_t
*dhdp
)
17975 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, NULL
,
17976 DHD_WQ_WORK_SSSR_DUMP
, dhd_sssr_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
17978 #endif /* DHD_SSSR_DUMP */
17980 #ifdef DHD_LOG_DUMP
17982 dhd_log_dump(void *handle
, void *event_info
, u8 event
)
17984 dhd_info_t
*dhd
= handle
;
17987 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
17991 if (do_dhd_log_dump(&dhd
->pub
)) {
17992 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__
));
17997 void dhd_schedule_log_dump(dhd_pub_t
*dhdp
)
17999 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
18000 (void*)NULL
, DHD_WQ_WORK_DHD_LOG_DUMP
,
18001 dhd_log_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18005 do_dhd_log_dump(dhd_pub_t
*dhdp
)
18007 int ret
= 0, i
= 0;
18008 struct file
*fp
= NULL
;
18009 mm_segment_t old_fs
;
18011 unsigned int wr_size
= 0;
18012 char dump_path
[128];
18013 struct timeval curtime
;
18015 unsigned long flags
= 0;
18016 struct dhd_log_dump_buf
*dld_buf
= &g_dld_buf
[0];
18018 const char *pre_strs
=
18019 "-------------------- General log ---------------------------\n";
18021 const char *post_strs
=
18022 "-------------------- Specific log --------------------------\n";
18028 DHD_ERROR(("DHD version: %s\n", dhd_version
));
18029 DHD_ERROR(("F/W version: %s\n", fw_version
));
18031 /* change to KERNEL_DS address limit */
18035 /* Init file name */
18036 memset(dump_path
, 0, sizeof(dump_path
));
18037 do_gettimeofday(&curtime
);
18038 snprintf(dump_path
, sizeof(dump_path
), "%s_%ld.%ld",
18039 DHD_COMMON_DUMP_PATH
"debug_dump",
18040 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
18041 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
18043 DHD_ERROR(("debug_dump_path = %s\n", dump_path
));
18044 fp
= filp_open(dump_path
, file_mode
, 0664);
18047 DHD_ERROR(("open file error, err = %d\n", ret
));
18051 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18052 ret
= kernel_write(fp
, pre_strs
, strlen(pre_strs
), &pos
);
18054 ret
= vfs_write(fp
, pre_strs
, strlen(pre_strs
), &pos
);
18057 DHD_ERROR(("write file error, err = %d\n", ret
));
18062 unsigned int buf_size
= (unsigned int)(dld_buf
->max
-
18063 (unsigned long)dld_buf
->buffer
);
18064 if (dld_buf
->wraparound
) {
18065 wr_size
= buf_size
;
18067 if (!dld_buf
->buffer
[0]) { /* print log if buf is empty. */
18068 DHD_ERROR_EX(("Buffer is empty. No event/log.\n"));
18070 wr_size
= (unsigned int)(dld_buf
->present
- dld_buf
->front
);
18073 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18074 ret
= kernel_write(fp
, dld_buf
->buffer
, wr_size
, &pos
);
18076 ret
= vfs_write(fp
, dld_buf
->buffer
, wr_size
, &pos
);
18079 DHD_ERROR(("write file error, err = %d\n", ret
));
18083 /* re-init dhd_log_dump_buf structure */
18084 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18085 dld_buf
->wraparound
= 0;
18086 dld_buf
->present
= dld_buf
->front
;
18087 dld_buf
->remain
= buf_size
;
18088 bzero(dld_buf
->buffer
, buf_size
);
18089 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18092 if (++i
< DLD_BUFFER_NUM
) {
18093 dld_buf
= &g_dld_buf
[i
];
18098 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18099 ret
= kernel_write(fp
, post_strs
, strlen(post_strs
), &pos
);
18101 ret
= vfs_write(fp
, post_strs
, strlen(post_strs
), &pos
);
18104 DHD_ERROR(("write file error, err = %d\n", ret
));
18110 #if defined(STAT_REPORT)
18111 if (!IS_ERR(fp
) && ret
>= 0) {
18112 wl_stat_report_file_save(dhdp
, fp
);
18114 #endif /* STAT_REPORT */
18117 filp_close(fp
, NULL
);
18123 #endif /* DHD_LOG_DUMP */
18126 #ifdef BCMASSERT_LOG
18127 #ifdef CUSTOMER_HW4_DEBUG
18128 #define ASSERTINFO PLATFORM_PATH".assert.info"
18129 #elif defined(CUSTOMER_HW2)
18130 #define ASSERTINFO "/data/misc/wifi/.assert.info"
18132 #define ASSERTINFO "/installmedia/.assert.info"
18133 #endif /* CUSTOMER_HW4_DEBUG */
18134 void dhd_get_assert_info(dhd_pub_t
*dhd
)
18136 struct file
*fp
= NULL
;
18137 char *filepath
= ASSERTINFO
;
18141 * Read assert info from the file
18142 * 0: Trigger Kernel crash by panic()
18143 * 1: Print out the logs and don't trigger Kernel panic. (default)
18144 * 2: Trigger Kernel crash by BUG()
18145 * File doesn't exist: Keep default value (1).
18147 fp
= filp_open(filepath
, O_RDONLY
, 0);
18149 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__
, filepath
));
18151 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18152 ssize_t ret
= kernel_read(fp
, (char *)&mem_val
, 4, NULL
);
18154 int ret
= kernel_read(fp
, 0, (char *)&mem_val
, 4);
18157 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__
, ret
));
18159 mem_val
= bcm_atoi((char *)&mem_val
);
18160 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__
, mem_val
));
18162 filp_close(fp
, NULL
);
18164 #ifdef CUSTOMER_HW4_DEBUG
18165 /* By default. set to 1, No Kernel Panic */
18166 g_assert_type
= (mem_val
>= 0) ? mem_val
: 1;
18168 /* By default. set to 0, Kernel Panic */
18169 g_assert_type
= (mem_val
>= 0) ? mem_val
: 0;
18172 #endif /* BCMASSERT_LOG */
18175 * This call is to get the memdump size so that,
18176 * halutil can alloc that much buffer in user space.
18179 dhd_os_socram_dump(struct net_device
*dev
, uint32
*dump_size
)
18182 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
18183 dhd_pub_t
*dhdp
= &dhd
->pub
;
18185 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
18186 DHD_ERROR(("%s: bus is down\n", __FUNCTION__
));
18190 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
18191 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18192 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
18196 ret
= dhd_common_socram_dump(dhdp
);
18197 if (ret
== BCME_OK
) {
18198 *dump_size
= dhdp
->soc_ram_length
;
18204 * This is to get the actual memdup after getting the memdump size
18207 dhd_os_get_socram_dump(struct net_device
*dev
, char **buf
, uint32
*size
)
18211 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
18212 dhd_pub_t
*dhdp
= &dhd
->pub
;
18216 if (dhdp
->soc_ram
) {
18217 if (orig_len
>= dhdp
->soc_ram_length
) {
18218 memcpy(*buf
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
18219 /* reset the storage of dump */
18220 memset(dhdp
->soc_ram
, 0, dhdp
->soc_ram_length
);
18221 *size
= dhdp
->soc_ram_length
;
18223 ret
= BCME_BUFTOOSHORT
;
18224 DHD_ERROR(("The length of the buffer is too short"
18225 " to save the memory dump with %d\n", dhdp
->soc_ram_length
));
18228 DHD_ERROR(("socram_dump is not ready to get\n"));
18229 ret
= BCME_NOTREADY
;
18235 dhd_os_get_version(struct net_device
*dev
, bool dhd_ver
, char **buf
, uint32 size
)
18240 return BCME_BADARG
;
18242 fw_str
= strstr(info_string
, "Firmware: ");
18243 if (fw_str
== NULL
) {
18247 memset(*buf
, 0, size
);
18249 strncpy(*buf
, dhd_version
, size
- 1);
18251 strncpy(*buf
, fw_str
, size
- 1);
18257 /* Returns interface specific WMF configuration */
18258 dhd_wmf_t
* dhd_wmf_conf(dhd_pub_t
*dhdp
, uint32 idx
)
18260 dhd_info_t
*dhd
= dhdp
->info
;
18263 ASSERT(idx
< DHD_MAX_IFS
);
18265 ifp
= dhd
->iflist
[idx
];
18268 #endif /* DHD_WMF */
18270 #if defined(TRAFFIC_MGMT_DWM)
18271 void traffic_mgmt_pkt_set_prio(dhd_pub_t
*dhdp
, void * pktbuf
)
18273 struct ether_header
*eh
;
18274 struct ethervlan_header
*evh
;
18275 uint8
*pktdata
, *ip_body
;
18279 pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
18280 eh
= (struct ether_header
*) pktdata
;
18283 if (dhdp
->dhd_tm_dwm_tbl
.dhd_dwm_enabled
) {
18284 if (eh
->ether_type
== hton16(ETHER_TYPE_8021Q
)) {
18285 evh
= (struct ethervlan_header
*)eh
;
18286 if ((evh
->ether_type
== hton16(ETHER_TYPE_IP
)) ||
18287 (evh
->ether_type
== hton16(ETHER_TYPE_IPV6
))) {
18288 ip_body
= pktdata
+ sizeof(struct ethervlan_header
);
18290 } else if ((eh
->ether_type
== hton16(ETHER_TYPE_IP
)) ||
18291 (eh
->ether_type
== hton16(ETHER_TYPE_IPV6
))) {
18292 ip_body
= pktdata
+ sizeof(struct ether_header
);
18295 tos_tc
= IP_TOS46(ip_body
);
18296 dscp
= tos_tc
>> IPV4_TOS_DSCP_SHIFT
;
18299 if (dscp
< DHD_DWM_TBL_SIZE
) {
18300 dwm_filter
= dhdp
->dhd_tm_dwm_tbl
.dhd_dwm_tbl
[dscp
];
18301 if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter
)) {
18302 PKTSETPRIO(pktbuf
, DHD_TRF_MGMT_DWM_PRIO(dwm_filter
));
18309 bool dhd_sta_associated(dhd_pub_t
*dhdp
, uint32 bssidx
, uint8
*mac
)
18311 return dhd_find_sta(dhdp
, bssidx
, mac
) ? TRUE
: FALSE
;
18314 #ifdef DHD_L2_FILTER
18316 dhd_get_ifp_arp_table_handle(dhd_pub_t
*dhdp
, uint32 bssidx
)
18318 dhd_info_t
*dhd
= dhdp
->info
;
18321 ASSERT(bssidx
< DHD_MAX_IFS
);
18323 ifp
= dhd
->iflist
[bssidx
];
18324 return ifp
->phnd_arp_table
;
18327 int dhd_get_parp_status(dhd_pub_t
*dhdp
, uint32 idx
)
18329 dhd_info_t
*dhd
= dhdp
->info
;
18332 ASSERT(idx
< DHD_MAX_IFS
);
18334 ifp
= dhd
->iflist
[idx
];
18337 return ifp
->parp_enable
;
18342 /* Set interface specific proxy arp configuration */
18343 int dhd_set_parp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
18345 dhd_info_t
*dhd
= dhdp
->info
;
18347 ASSERT(idx
< DHD_MAX_IFS
);
18348 ifp
= dhd
->iflist
[idx
];
18353 /* At present all 3 variables are being
18356 ifp
->parp_enable
= val
;
18357 ifp
->parp_discard
= val
;
18358 ifp
->parp_allnode
= val
;
18360 /* Flush ARP entries when disabled */
18361 if (val
== FALSE
) {
18362 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
, NULL
,
18363 FALSE
, dhdp
->tickcnt
);
18368 bool dhd_parp_discard_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
18370 dhd_info_t
*dhd
= dhdp
->info
;
18373 ASSERT(idx
< DHD_MAX_IFS
);
18375 ifp
= dhd
->iflist
[idx
];
18378 return ifp
->parp_discard
;
18382 dhd_parp_allnode_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
18384 dhd_info_t
*dhd
= dhdp
->info
;
18387 ASSERT(idx
< DHD_MAX_IFS
);
18389 ifp
= dhd
->iflist
[idx
];
18393 return ifp
->parp_allnode
;
18396 int dhd_get_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
)
18398 dhd_info_t
*dhd
= dhdp
->info
;
18401 ASSERT(idx
< DHD_MAX_IFS
);
18403 ifp
= dhd
->iflist
[idx
];
18407 return ifp
->dhcp_unicast
;
18410 int dhd_set_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
18412 dhd_info_t
*dhd
= dhdp
->info
;
18414 ASSERT(idx
< DHD_MAX_IFS
);
18415 ifp
= dhd
->iflist
[idx
];
18419 ifp
->dhcp_unicast
= val
;
18423 int dhd_get_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
)
18425 dhd_info_t
*dhd
= dhdp
->info
;
18428 ASSERT(idx
< DHD_MAX_IFS
);
18430 ifp
= dhd
->iflist
[idx
];
18434 return ifp
->block_ping
;
18437 int dhd_set_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
18439 dhd_info_t
*dhd
= dhdp
->info
;
18441 ASSERT(idx
< DHD_MAX_IFS
);
18442 ifp
= dhd
->iflist
[idx
];
18446 ifp
->block_ping
= val
;
18447 /* Disable rx_pkt_chain feature for interface if block_ping option is
18450 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
18454 int dhd_get_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
)
18456 dhd_info_t
*dhd
= dhdp
->info
;
18459 ASSERT(idx
< DHD_MAX_IFS
);
18461 ifp
= dhd
->iflist
[idx
];
18465 return ifp
->grat_arp
;
18468 int dhd_set_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
18470 dhd_info_t
*dhd
= dhdp
->info
;
18472 ASSERT(idx
< DHD_MAX_IFS
);
18473 ifp
= dhd
->iflist
[idx
];
18477 ifp
->grat_arp
= val
;
18481 #endif /* DHD_L2_FILTER */
18484 #if defined(SET_RPS_CPUS)
18485 int dhd_rps_cpus_enable(struct net_device
*net
, int enable
)
18487 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
18490 char * RPS_CPU_SETBUF
;
18492 ifidx
= dhd_net2idx(dhd
, net
);
18493 if (ifidx
== DHD_BAD_IF
) {
18494 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
18498 if (ifidx
== PRIMARY_INF
) {
18499 if (dhd
->pub
.op_mode
== DHD_FLAG_IBSS_MODE
) {
18500 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__
));
18501 RPS_CPU_SETBUF
= RPS_CPUS_MASK_IBSS
;
18503 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__
));
18504 RPS_CPU_SETBUF
= RPS_CPUS_MASK
;
18506 } else if (ifidx
== VIRTUAL_INF
) {
18507 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__
));
18508 RPS_CPU_SETBUF
= RPS_CPUS_MASK_P2P
;
18510 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__
, ifidx
));
18514 ifp
= dhd
->iflist
[ifidx
];
18517 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__
, RPS_CPU_SETBUF
));
18518 custom_rps_map_set(ifp
->net
->_rx
, RPS_CPU_SETBUF
, strlen(RPS_CPU_SETBUF
));
18520 custom_rps_map_clear(ifp
->net
->_rx
);
18523 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__
));
18529 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
)
18531 struct rps_map
*old_map
, *map
;
18532 cpumask_var_t mask
;
18534 static DEFINE_SPINLOCK(rps_map_lock
);
18536 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
18538 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
18539 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__
));
18543 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
18545 free_cpumask_var(mask
);
18546 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__
));
18550 map
= kzalloc(max_t(unsigned int,
18551 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
18554 free_cpumask_var(mask
);
18555 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__
));
18560 for_each_cpu(cpu
, mask
) {
18561 map
->cpus
[i
++] = cpu
;
18569 free_cpumask_var(mask
);
18570 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__
));
18574 spin_lock(&rps_map_lock
);
18575 old_map
= rcu_dereference_protected(queue
->rps_map
,
18576 lockdep_is_held(&rps_map_lock
));
18577 rcu_assign_pointer(queue
->rps_map
, map
);
18578 spin_unlock(&rps_map_lock
);
18581 static_key_slow_inc(&rps_needed
);
18584 kfree_rcu(old_map
, rcu
);
18585 static_key_slow_dec(&rps_needed
);
18587 free_cpumask_var(mask
);
18589 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__
, map
->len
));
18593 void custom_rps_map_clear(struct netdev_rx_queue
*queue
)
18595 struct rps_map
*map
;
18597 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
18599 map
= rcu_dereference_protected(queue
->rps_map
, 1);
18601 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
18602 kfree_rcu(map
, rcu
);
18603 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__
));
18610 #ifdef DHD_DEBUG_PAGEALLOC
18613 dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
)
18615 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
18617 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
18618 __FUNCTION__
, addr_corrupt
, (uint32
)len
));
18620 DHD_OS_WAKE_LOCK(dhdp
);
18621 prhex("Page Corruption:", addr_corrupt
, len
);
18622 dhd_dump_to_kernelog(dhdp
);
18623 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
18624 /* Load the dongle side dump to host memory and then BUG_ON() */
18625 dhdp
->memdump_enabled
= DUMP_MEMONLY
;
18626 dhdp
->memdump_type
= DUMP_TYPE_MEMORY_CORRUPTION
;
18627 dhd_bus_mem_dump(dhdp
);
18628 #endif /* BCMPCIE && DHD_FW_COREDUMP */
18629 DHD_OS_WAKE_UNLOCK(dhdp
);
18631 EXPORT_SYMBOL(dhd_page_corrupt_cb
);
18632 #endif /* DHD_DEBUG_PAGEALLOC */
18634 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
18636 dhd_pktid_error_handler(dhd_pub_t
*dhdp
)
18638 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__
));
18639 DHD_OS_WAKE_LOCK(dhdp
);
18640 dhd_dump_to_kernelog(dhdp
);
18641 #ifdef DHD_FW_COREDUMP
18642 /* Load the dongle side dump to host memory */
18643 if (dhdp
->memdump_enabled
== DUMP_DISABLED
) {
18644 dhdp
->memdump_enabled
= DUMP_MEMFILE
;
18646 dhdp
->memdump_type
= DUMP_TYPE_PKTID_AUDIT_FAILURE
;
18647 dhd_bus_mem_dump(dhdp
);
18648 #endif /* DHD_FW_COREDUMP */
18649 dhdp
->hang_reason
= HANG_REASON_PCIE_PKTID_ERROR
;
18650 dhd_os_check_hang(dhdp
, 0, -EREMOTEIO
);
18651 DHD_OS_WAKE_UNLOCK(dhdp
);
18653 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
18655 struct net_device
*
18656 dhd_linux_get_primary_netdev(dhd_pub_t
*dhdp
)
18658 dhd_info_t
*dhd
= dhdp
->info
;
18660 if (dhd
->iflist
[0] && dhd
->iflist
[0]->net
)
18661 return dhd
->iflist
[0]->net
;
18666 #ifdef DHD_DHCP_DUMP
18668 dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
18670 struct bootp_fmt
*b
= (struct bootp_fmt
*) &pktdata
[ETHER_HDR_LEN
];
18671 struct iphdr
*h
= &b
->ip_header
;
18672 uint8
*ptr
, *opt
, *end
= (uint8
*) b
+ ntohs(b
->ip_header
.tot_len
);
18673 int dhcp_type
= 0, len
, opt_len
;
18675 /* check IP header */
18676 if (h
->ihl
!= 5 || h
->version
!= 4 || h
->protocol
!= IPPROTO_UDP
) {
18680 /* check UDP port for bootp (67, 68) */
18681 if (b
->udp_header
.source
!= htons(67) && b
->udp_header
.source
!= htons(68) &&
18682 b
->udp_header
.dest
!= htons(67) && b
->udp_header
.dest
!= htons(68)) {
18686 /* check header length */
18687 if (ntohs(h
->tot_len
) < ntohs(b
->udp_header
.len
) + sizeof(struct iphdr
)) {
18691 len
= ntohs(b
->udp_header
.len
) - sizeof(struct udphdr
);
18693 - (sizeof(*b
) - sizeof(struct iphdr
) - sizeof(struct udphdr
) - sizeof(b
->options
));
18695 /* parse bootp options */
18696 if (opt_len
>= 4 && !memcmp(b
->options
, bootp_magic_cookie
, 4)) {
18697 ptr
= &b
->options
[4];
18698 while (ptr
< end
&& *ptr
!= 0xff) {
18707 /* 53 is dhcp type */
18710 dhcp_type
= opt
[2];
18711 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
18712 ifname
, dhcp_types
[dhcp_type
],
18713 tx
? "TX" : "RX", dhcp_ops
[b
->op
]));
18720 #endif /* DHD_DHCP_DUMP */
18722 #ifdef DHD_ICMP_DUMP
18724 dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
18726 uint8
*pkt
= (uint8
*)&pktdata
[ETHER_HDR_LEN
];
18727 struct iphdr
*iph
= (struct iphdr
*)pkt
;
18728 struct icmphdr
*icmph
;
18730 /* check IP header */
18731 if (iph
->ihl
!= 5 || iph
->version
!= 4 || iph
->protocol
!= IP_PROT_ICMP
) {
18735 icmph
= (struct icmphdr
*)((uint8
*)pkt
+ sizeof(struct iphdr
));
18736 if (icmph
->type
== ICMP_ECHO
) {
18737 DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
18738 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
18739 } else if (icmph
->type
== ICMP_ECHOREPLY
) {
18740 DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
18741 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
18743 DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
18744 ifname
, tx
? "TX" : "RX", icmph
->type
, icmph
->code
));
18747 #endif /* DHD_ICMP_DUMP */
18749 #ifdef SHOW_LOGTRACE
18751 dhd_get_read_buf_ptr(dhd_pub_t
*dhd_pub
, trace_buf_info_t
*trace_buf_info
)
18753 dhd_dbg_ring_status_t ring_status
;
18756 rlen
= dhd_dbg_ring_pull_single(dhd_pub
, FW_VERBOSE_RING_ID
, trace_buf_info
->buf
,
18757 TRACE_LOG_BUF_MAX_SIZE
, TRUE
);
18758 trace_buf_info
->size
= rlen
;
18759 trace_buf_info
->availability
= NEXT_BUF_NOT_AVAIL
;
18761 trace_buf_info
->availability
= BUF_NOT_AVAILABLE
;
18764 dhd_dbg_get_ring_status(dhd_pub
, FW_VERBOSE_RING_ID
, &ring_status
);
18765 if (ring_status
.written_bytes
!= ring_status
.read_bytes
) {
18766 trace_buf_info
->availability
= NEXT_BUF_AVAIL
;
18769 #endif /* SHOW_LOGTRACE */
18772 dhd_fw_download_status(dhd_pub_t
* dhd_pub
)
18774 return dhd_pub
->fw_download_done
;
18778 dhd_create_to_notifier_skt(void)
18780 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18781 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
18782 /* Kernel version 3.6 is a special case which accepts 4 arguments */
18783 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, &g_cfg
);
18784 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
18785 /* Kernel version 3.5 and below use this old API format */
18786 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, 0,
18787 dhd_process_daemon_msg
, NULL
, THIS_MODULE
);
18789 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, THIS_MODULE
, &g_cfg
);
18790 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
18791 if (!nl_to_event_sk
)
18793 printf("Error creating socket.\n");
18796 DHD_INFO(("nl_to socket created successfully...\n"));
18801 dhd_destroy_to_notifier_skt(void)
18803 DHD_INFO(("Destroying nl_to socket\n"));
18804 if (nl_to_event_sk
) {
18805 netlink_kernel_release(nl_to_event_sk
);
18810 dhd_recv_msg_from_daemon(struct sk_buff
*skb
)
18812 struct nlmsghdr
*nlh
;
18813 bcm_to_info_t
*cmd
;
18815 nlh
= (struct nlmsghdr
*)skb
->data
;
18816 cmd
= (bcm_to_info_t
*)nlmsg_data(nlh
);
18817 if ((cmd
->magic
== BCM_TO_MAGIC
) && (cmd
->reason
== REASON_DAEMON_STARTED
)) {
18818 sender_pid
= ((struct nlmsghdr
*)(skb
->data
))->nlmsg_pid
;
18819 DHD_INFO(("DHD Daemon Started\n"));
18824 dhd_send_msg_to_daemon(struct sk_buff
*skb
, void *data
, int size
)
18826 struct nlmsghdr
*nlh
;
18827 struct sk_buff
*skb_out
;
18829 if (!nl_to_event_sk
) {
18830 DHD_INFO(("No socket available\n"));
18834 BCM_REFERENCE(skb
);
18835 if (sender_pid
== 0) {
18836 DHD_INFO(("Invalid PID 0\n"));
18840 if ((skb_out
= nlmsg_new(size
, 0)) == NULL
) {
18841 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__
));
18844 nlh
= nlmsg_put(skb_out
, 0, 0, NLMSG_DONE
, size
, 0);
18845 NETLINK_CB(skb_out
).dst_group
= 0; /* Unicast */
18846 memcpy(nlmsg_data(nlh
), (char *)data
, size
);
18848 if ((nlmsg_unicast(nl_to_event_sk
, skb_out
, sender_pid
)) < 0) {
18849 DHD_INFO(("Error sending message\n"));
18856 dhd_process_daemon_msg(struct sk_buff
*skb
)
18858 bcm_to_info_t to_info
;
18860 to_info
.magic
= BCM_TO_MAGIC
;
18861 to_info
.reason
= REASON_DAEMON_STARTED
;
18862 to_info
.trap
= NO_TRAP
;
18864 dhd_recv_msg_from_daemon(skb
);
18865 dhd_send_msg_to_daemon(skb
, &to_info
, sizeof(to_info
));
18868 #ifdef REPORT_FATAL_TIMEOUTS
18870 dhd_send_trap_to_fw(dhd_pub_t
* pub
, int reason
, int trap
)
18872 bcm_to_info_t to_info
;
18874 to_info
.magic
= BCM_TO_MAGIC
;
18875 to_info
.reason
= reason
;
18876 to_info
.trap
= trap
;
18878 DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason
, trap
));
18879 dhd_send_msg_to_daemon(NULL
, (void *)&to_info
, sizeof(bcm_to_info_t
));
18883 dhd_send_trap_to_fw_for_timeout(dhd_pub_t
* pub
, timeout_reasons_t reason
)
18886 int trap
= NO_TRAP
;
18888 case DHD_REASON_COMMAND_TO
:
18889 to_reason
= REASON_COMMAND_TO
;
18892 case DHD_REASON_JOIN_TO
:
18893 to_reason
= REASON_JOIN_TO
;
18895 case DHD_REASON_SCAN_TO
:
18896 to_reason
= REASON_SCAN_TO
;
18898 case DHD_REASON_OQS_TO
:
18899 to_reason
= REASON_OQS_TO
;
18903 to_reason
= REASON_UNKOWN
;
18905 dhd_send_trap_to_fw(pub
, to_reason
, trap
);
18907 #endif /* REPORT_FATAL_TIMEOUTS */
18909 #ifdef DHD_LOG_DUMP
18911 dhd_log_dump_init(dhd_pub_t
*dhd
)
18913 struct dhd_log_dump_buf
*dld_buf
;
18915 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18916 int prealloc_idx
= DHD_PREALLOC_DHD_LOG_DUMP_BUF
;
18917 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18919 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
18920 dld_buf
= &g_dld_buf
[i
];
18921 spin_lock_init(&dld_buf
->lock
);
18922 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18923 dld_buf
->buffer
= DHD_OS_PREALLOC(dhd
, prealloc_idx
++, dld_buf_size
[i
]);
18925 dld_buf
->buffer
= kmalloc(dld_buf_size
[i
], GFP_KERNEL
);
18926 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18928 if (!dld_buf
->buffer
) {
18929 dld_buf
->buffer
= kmalloc(dld_buf_size
[i
], GFP_KERNEL
);
18930 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
18932 if (!dld_buf
->buffer
) {
18933 DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i
));
18938 dld_buf
->wraparound
= 0;
18939 dld_buf
->max
= (unsigned long)dld_buf
->buffer
+ dld_buf_size
[i
];
18940 dld_buf
->present
= dld_buf
->front
= dld_buf
->buffer
;
18941 dld_buf
->remain
= dld_buf_size
[i
];
18942 dld_buf
->enable
= 1;
18947 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
18948 if (dld_buf
[i
].buffer
) {
18949 kfree(dld_buf
[i
].buffer
);
18955 dhd_log_dump_deinit(dhd_pub_t
*dhd
)
18957 struct dhd_log_dump_buf
*dld_buf
;
18960 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
18961 dld_buf
= &g_dld_buf
[i
];
18962 dld_buf
->enable
= 0;
18963 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18964 DHD_OS_PREFREE(dhd
, dld_buf
->buffer
, dld_buf_size
[i
]);
18966 kfree(dld_buf
->buffer
);
18967 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18972 dhd_log_dump_write(int type
, const char *fmt
, ...)
18975 char tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
] = {0, };
18977 unsigned long flags
= 0;
18978 struct dhd_log_dump_buf
*dld_buf
= NULL
;
18982 case DLD_BUF_TYPE_GENERAL
:
18983 dld_buf
= &g_dld_buf
[type
];
18985 case DLD_BUF_TYPE_SPECIAL
:
18986 dld_buf
= &g_dld_buf
[type
];
18989 DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
18990 __FUNCTION__
, type
));
18994 if (dld_buf
->enable
!= 1) {
18998 va_start(args
, fmt
);
19000 len
= vsnprintf(tmp_buf
, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
, fmt
, args
);
19001 /* Non ANSI C99 compliant returns -1,
19002 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
19008 if (len
>= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
) {
19009 len
= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
- 1;
19010 tmp_buf
[len
] = '\0';
19013 /* make a critical section to eliminate race conditions */
19014 spin_lock_irqsave(&dld_buf
->lock
, flags
);
19015 if (dld_buf
->remain
< len
) {
19016 dld_buf
->wraparound
= 1;
19017 dld_buf
->present
= dld_buf
->front
;
19018 dld_buf
->remain
= dld_buf_size
[type
];
19021 strncpy(dld_buf
->present
, tmp_buf
, len
);
19022 dld_buf
->remain
-= len
;
19023 dld_buf
->present
+= len
;
19024 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
19026 /* double check invalid memory operation */
19027 ASSERT((unsigned long)dld_buf
->present
<= dld_buf
->max
);
19032 dhd_log_dump_get_timestamp(void)
19034 static char buf
[16];
19036 unsigned long rem_nsec
;
19038 ts_nsec
= local_clock();
19039 rem_nsec
= do_div(ts_nsec
, 1000000000);
19040 snprintf(buf
, sizeof(buf
), "%5lu.%06lu",
19041 (unsigned long)ts_nsec
, rem_nsec
/ 1000);
19045 #endif /* DHD_LOG_DUMP */
19048 dhd_write_file(const char *filepath
, char *buf
, int buf_len
)
19050 struct file
*fp
= NULL
;
19051 mm_segment_t old_fs
;
19054 /* change to KERNEL_DS address limit */
19058 /* File is always created. */
19059 fp
= filp_open(filepath
, O_RDWR
| O_CREAT
, 0664);
19061 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
19062 __FUNCTION__
, filepath
, PTR_ERR(fp
)));
19065 if (fp
->f_mode
& FMODE_WRITE
) {
19066 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19067 ret
= kernel_write(fp
, buf
, buf_len
, &fp
->f_pos
);
19069 ret
= vfs_write(fp
, buf
, buf_len
, &fp
->f_pos
);
19072 DHD_ERROR(("%s: Couldn't write file '%s'\n",
19073 __FUNCTION__
, filepath
));
19079 filp_close(fp
, NULL
);
19082 /* restore previous address limit */
19089 dhd_read_file(const char *filepath
, char *buf
, int buf_len
)
19091 struct file
*fp
= NULL
;
19092 mm_segment_t old_fs
;
19095 /* change to KERNEL_DS address limit */
19099 fp
= filp_open(filepath
, O_RDONLY
, 0);
19102 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__
, filepath
));
19106 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19107 ret
= kernel_read(fp
, buf
, buf_len
, NULL
);
19109 ret
= kernel_read(fp
, 0, buf
, buf_len
);
19111 filp_close(fp
, NULL
);
19113 /* restore previous address limit */
19116 /* Return the number of bytes read */
19118 /* Success to read */
19121 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
19122 __FUNCTION__
, filepath
, ret
));
19130 dhd_write_file_and_check(const char *filepath
, char *buf
, int buf_len
)
19134 ret
= dhd_write_file(filepath
, buf
, buf_len
);
19139 /* Read the file again and check if the file size is not zero */
19140 memset(buf
, 0, buf_len
);
19141 ret
= dhd_read_file(filepath
, buf
, buf_len
);
19147 #define DHD_LB_TXBOUND 64
19149 * Function that performs the TX processing on a given CPU
19152 dhd_lb_tx_process(dhd_info_t
*dhd
)
19154 struct sk_buff
*skb
;
19156 struct net_device
*net
;
19158 bool resched
= FALSE
;
19160 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__
));
19162 DHD_ERROR((" Null pointer DHD \r\n"));
19166 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
19168 /* Base Loop to perform the actual Tx */
19170 skb
= skb_dequeue(&dhd
->tx_pend_queue
);
19172 DHD_TRACE(("Dequeued a Null Packet \r\n"));
19177 net
= DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
19178 ifidx
= DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
19180 BCM_REFERENCE(net
);
19181 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb
,
19184 __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
19186 if (cnt
>= DHD_LB_TXBOUND
) {
19193 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__
, cnt
));
19199 dhd_lb_tx_handler(unsigned long data
)
19201 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
19203 if (dhd_lb_tx_process(dhd
)) {
19204 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
19208 #endif /* DHD_LB_TXP */
19210 /* ----------------------------------------------------------------------------
19211 * Infrastructure code for sysfs interface support for DHD
19213 * What is sysfs interface?
19214 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
19216 * Why sysfs interface?
19217 * This is the Linux standard way of changing/configuring Run Time parameters
19218 * for a driver. We can use this interface to control "linux" specific driver
19221 * -----------------------------------------------------------------------------
19224 #include <linux/sysfs.h>
19225 #include <linux/kobject.h>
19227 #if defined(DHD_TRACE_WAKE_LOCK)
19229 /* Function to show the history buffer */
19231 show_wklock_trace(struct dhd_info
*dev
, char *buf
)
19234 dhd_info_t
*dhd
= (dhd_info_t
*)dev
;
19239 dhd_wk_lock_stats_dump(&dhd
->pub
);
19243 /* Function to enable/disable wakelock trace */
19245 wklock_trace_onoff(struct dhd_info
*dev
, const char *buf
, size_t count
)
19247 unsigned long onoff
;
19248 unsigned long flags
;
19249 dhd_info_t
*dhd
= (dhd_info_t
*)dev
;
19251 onoff
= bcm_strtoul(buf
, NULL
, 10);
19252 if (onoff
!= 0 && onoff
!= 1) {
19256 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
19257 trace_wklock_onoff
= onoff
;
19258 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
19259 if (trace_wklock_onoff
) {
19260 printk("ENABLE WAKLOCK TRACE\n");
19262 printk("DISABLE WAKELOCK TRACE\n");
19265 return (ssize_t
)(onoff
+1);
19267 #endif /* DHD_TRACE_WAKE_LOCK */
19269 #if defined(DHD_LB_TXP)
19271 show_lbtxp(struct dhd_info
*dev
, char *buf
)
19274 unsigned long onoff
;
19275 dhd_info_t
*dhd
= (dhd_info_t
*)dev
;
19277 onoff
= atomic_read(&dhd
->lb_txp_active
);
19278 ret
= scnprintf(buf
, PAGE_SIZE
- 1, "%lu \n",
19284 lbtxp_onoff(struct dhd_info
*dev
, const char *buf
, size_t count
)
19286 unsigned long onoff
;
19287 dhd_info_t
*dhd
= (dhd_info_t
*)dev
;
19290 onoff
= bcm_strtoul(buf
, NULL
, 10);
19292 sscanf(buf
, "%lu", &onoff
);
19293 if (onoff
!= 0 && onoff
!= 1) {
19296 atomic_set(&dhd
->lb_txp_active
, onoff
);
19298 /* Since the scheme is changed clear the counters */
19299 for (i
= 0; i
< NR_CPUS
; i
++) {
19300 DHD_LB_STATS_CLR(dhd
->txp_percpu_run_cnt
[i
]);
19301 DHD_LB_STATS_CLR(dhd
->tx_start_percpu_run_cnt
[i
]);
19307 #endif /* DHD_LB_TXP */
19309 * Generic Attribute Structure for DHD.
19310 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
19311 * to instantiate an object of type dhd_attr, populate it with
19312 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
19313 * and add the object to default_attrs[] array, that gets registered
19314 * to the kobject of dhd (named bcm-dhd).
19318 struct attribute attr
;
19319 ssize_t(*show
)(struct dhd_info
*, char *);
19320 ssize_t(*store
)(struct dhd_info
*, const char *, size_t count
);
19323 #if defined(DHD_TRACE_WAKE_LOCK)
19324 static struct dhd_attr dhd_attr_wklock
=
19325 __ATTR(wklock_trace
, 0660, show_wklock_trace
, wklock_trace_onoff
);
19326 #endif /* defined(DHD_TRACE_WAKE_LOCK */
19328 #if defined(DHD_LB_TXP)
19329 static struct dhd_attr dhd_attr_lbtxp
=
19330 __ATTR(lbtxp
, 0660, show_lbtxp
, lbtxp_onoff
);
19331 #endif /* DHD_LB_TXP */
19333 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
19334 static struct attribute
*default_attrs
[] = {
19335 #if defined(DHD_TRACE_WAKE_LOCK)
19336 &dhd_attr_wklock
.attr
,
19337 #endif /* DHD_TRACE_WAKE_LOCK */
19338 #if defined(DHD_LB_TXP)
19339 &dhd_attr_lbtxp
.attr
,
19340 #endif /* DHD_LB_TXP */
19344 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
19345 #define to_attr(a) container_of(a, struct dhd_attr, attr)
19348 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19349 * node under "bcm-dhd" the show function is called.
19351 static ssize_t
dhd_show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
19353 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19354 #pragma GCC diagnostic push
19355 #pragma GCC diagnostic ignored "-Wcast-qual"
19357 dhd_info_t
*dhd
= to_dhd(kobj
);
19358 struct dhd_attr
*d_attr
= to_attr(attr
);
19359 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19360 #pragma GCC diagnostic pop
19365 ret
= d_attr
->show(dhd
, buf
);
19373 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19374 * node under "bcm-dhd" the store function is called.
19376 static ssize_t
dhd_store(struct kobject
*kobj
, struct attribute
*attr
,
19377 const char *buf
, size_t count
)
19379 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19380 #pragma GCC diagnostic push
19381 #pragma GCC diagnostic ignored "-Wcast-qual"
19383 dhd_info_t
*dhd
= to_dhd(kobj
);
19384 struct dhd_attr
*d_attr
= to_attr(attr
);
19385 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19386 #pragma GCC diagnostic pop
19391 ret
= d_attr
->store(dhd
, buf
, count
);
19399 static struct sysfs_ops dhd_sysfs_ops
= {
19401 .store
= dhd_store
,
19404 static struct kobj_type dhd_ktype
= {
19405 .sysfs_ops
= &dhd_sysfs_ops
,
19406 .default_attrs
= default_attrs
,
19409 /* Create a kobject and attach to sysfs interface */
19410 static int dhd_sysfs_init(dhd_info_t
*dhd
)
19415 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__
));
19419 /* Initialize the kobject */
19420 ret
= kobject_init_and_add(&dhd
->dhd_kobj
, &dhd_ktype
, NULL
, "bcm-dhd");
19422 kobject_put(&dhd
->dhd_kobj
);
19423 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__
));
19428 * We are always responsible for sending the uevent that the kobject
19429 * was added to the system.
19431 kobject_uevent(&dhd
->dhd_kobj
, KOBJ_ADD
);
19436 /* Done with the kobject and detach the sysfs interface */
19437 static void dhd_sysfs_exit(dhd_info_t
*dhd
)
19440 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__
));
19444 /* Releae the kobject */
19445 if (dhd
->dhd_kobj
.state_initialized
)
19446 kobject_put(&dhd
->dhd_kobj
);
19449 #ifdef DHD_DEBUG_UART
19451 dhd_debug_uart_is_running(struct net_device
*dev
)
19453 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
19455 if (dhd
->duart_execute
) {
19463 dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
)
19465 dhd_pub_t
*dhdp
= handle
;
19466 dhd_debug_uart_exec(dhdp
, "rd");
19470 dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
)
19474 char *argv
[] = {DHD_DEBUG_UART_EXEC_PATH
, cmd
, NULL
};
19475 char *envp
[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL
};
19477 #ifdef DHD_FW_COREDUMP
19478 if (dhdp
->memdump_enabled
== DUMP_MEMFILE_BUGON
)
19481 if (dhdp
->hang_reason
== HANG_REASON_PCIE_LINK_DOWN
||
19482 #ifdef DHD_FW_COREDUMP
19483 dhdp
->memdump_success
== FALSE
||
19486 dhdp
->info
->duart_execute
= TRUE
;
19487 DHD_ERROR(("DHD: %s - execute %s %s\n",
19488 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
));
19489 ret
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
19490 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
19491 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
, ret
));
19492 dhdp
->info
->duart_execute
= FALSE
;
19494 #ifdef DHD_LOG_DUMP
19495 if (dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
)
19503 #endif /* DHD_DEBUG_UART */
19505 #if defined(DHD_BLOB_EXISTENCE_CHECK)
19507 dhd_set_blob_support(dhd_pub_t
*dhdp
, char *fw_path
)
19510 char *filepath
= CONFIG_BCMDHD_CLM_PATH
;
19512 fp
= filp_open(filepath
, O_RDONLY
, 0);
19514 DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__
));
19515 dhdp
->is_blob
= FALSE
;
19517 DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__
));
19518 dhdp
->is_blob
= TRUE
;
19519 #if defined(CONCATE_BLOB)
19520 strncat(fw_path
, "_blob", strlen("_blob"));
19522 BCM_REFERENCE(fw_path
);
19523 #endif /* SKIP_CONCATE_BLOB */
19524 filp_close(fp
, NULL
);
19527 #endif /* DHD_BLOB_EXISTENCE_CHECK */
19529 #if defined(PCIE_FULL_DONGLE)
19530 /** test / loopback */
19532 dmaxfer_free_dmaaddr_handler(void *handle
, void *event_info
, u8 event
)
19534 dmaxref_mem_map_t
*dmmap
= (dmaxref_mem_map_t
*)event_info
;
19535 dhd_info_t
*dhd_info
= (dhd_info_t
*)handle
;
19536 dhd_pub_t
*dhdp
= &dhd_info
->pub
;
19538 if (event
!= DHD_WQ_WORK_DMA_LB_MEM_REL
) {
19539 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
19543 if ((dhd_info
== NULL
) || (dhdp
== NULL
)) {
19544 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__
));
19548 if (dmmap
== NULL
) {
19549 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__
));
19552 dmaxfer_free_prev_dmaaddr(dhdp
, dmmap
);
19557 dhd_schedule_dmaxfer_free(dhd_pub_t
*dhdp
, dmaxref_mem_map_t
*dmmap
)
19559 dhd_info_t
*dhd_info
= dhdp
->info
;
19561 dhd_deferred_schedule_work(dhd_info
->dhd_deferred_wq
, (void *)dmmap
,
19562 DHD_WQ_WORK_DMA_LB_MEM_REL
, dmaxfer_free_dmaaddr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
19564 #endif /* PCIE_FULL_DONGLE */
19565 /* ---------------------------- End of sysfs implementation ------------------------------------- */
19566 #ifdef HOFFLOAD_MODULES
19568 dhd_linux_get_modfw_address(dhd_pub_t
*dhd
)
19570 const char* module_name
= NULL
;
19571 const struct firmware
*module_fw
;
19572 struct module_metadata
*hmem
= &dhd
->hmem
;
19574 if (dhd_hmem_module_string
[0] != '\0') {
19575 module_name
= dhd_hmem_module_string
;
19577 DHD_ERROR(("%s No module image name specified\n", __FUNCTION__
));
19580 if (request_firmware(&module_fw
, module_name
, dhd_bus_to_dev(dhd
->bus
))) {
19581 DHD_ERROR(("modules.img not available\n"));
19584 if (!dhd_alloc_module_memory(dhd
->bus
, module_fw
->size
, hmem
)) {
19585 release_firmware(module_fw
);
19588 memcpy(hmem
->data
, module_fw
->data
, module_fw
->size
);
19589 release_firmware(module_fw
);
19591 #endif /* HOFFLOAD_MODULES */
19593 #ifdef SET_PCIE_IRQ_CPU_CORE
19595 dhd_set_irq_cpucore(dhd_pub_t
*dhdp
, int set
)
19599 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
19604 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__
));
19608 if (dhdpcie_get_pcieirq(dhdp
->bus
, &irq
)) {
19612 set_irq_cpucore(irq
, set
);
19614 #endif /* SET_PCIE_IRQ_CPU_CORE */
19616 #if defined(DHD_HANG_SEND_UP_TEST)
19618 dhd_make_hang_with_reason(struct net_device
*dev
, const char *string_num
)
19620 dhd_info_t
*dhd
= NULL
;
19621 dhd_pub_t
*dhdp
= NULL
;
19622 uint reason
= HANG_REASON_MAX
;
19623 char buf
[WLC_IOCTL_SMLEN
] = {0, };
19624 uint32 fw_test_code
= 0;
19625 dhd
= DHD_DEV_INFO(dev
);
19631 if (!dhd
|| !dhdp
) {
19635 reason
= (uint
) bcm_strtoul(string_num
, NULL
, 0);
19636 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__
, reason
));
19639 if (dhdp
->req_hang_type
) {
19640 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
19641 __FUNCTION__
, dhdp
->req_hang_type
));
19642 dhdp
->req_hang_type
= 0;
19645 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__
));
19648 } else if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
19649 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason
));
19653 if (dhdp
->req_hang_type
!= 0) {
19654 DHD_ERROR(("Already HANG requested for test\n"));
19659 case HANG_REASON_IOCTL_RESP_TIMEOUT
:
19660 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason
));
19661 dhdp
->req_hang_type
= reason
;
19662 fw_test_code
= 102; /* resumed on timeour */
19663 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code
, 4, buf
, sizeof(buf
));
19664 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
19666 case HANG_REASON_DONGLE_TRAP
:
19667 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason
));
19668 dhdp
->req_hang_type
= reason
;
19669 fw_test_code
= 99; /* dongle trap */
19670 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code
, 4, buf
, sizeof(buf
));
19671 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
19673 case HANG_REASON_D3_ACK_TIMEOUT
:
19674 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason
));
19675 dhdp
->req_hang_type
= reason
;
19677 case HANG_REASON_BUS_DOWN
:
19678 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason
));
19679 dhdp
->req_hang_type
= reason
;
19681 case HANG_REASON_PCIE_LINK_DOWN
:
19682 case HANG_REASON_MSGBUF_LIVELOCK
:
19683 dhdp
->req_hang_type
= 0;
19684 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason
));
19686 case HANG_REASON_IFACE_OP_FAILURE
:
19687 DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason
));
19688 dhdp
->req_hang_type
= reason
;
19690 case HANG_REASON_HT_AVAIL_ERROR
:
19691 dhdp
->req_hang_type
= 0;
19692 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason
));
19694 case HANG_REASON_PCIE_RC_LINK_UP_FAIL
:
19695 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason
));
19696 dhdp
->req_hang_type
= reason
;
19699 dhdp
->req_hang_type
= 0;
19700 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason
));
19704 #endif /* DHD_HANG_SEND_UP_TEST */
19705 #ifdef DHD_WAKE_STATUS
19707 dhd_get_wakecount(dhd_pub_t
*dhdp
)
19712 return dhd_bus_get_wakecount(dhdp
);
19713 #endif /* BCMDBUS */
19715 #endif /* DHD_WAKE_STATUS */
19717 #ifdef BCM_ASLR_HEAP
19719 dhd_get_random_number(void)
19722 get_random_bytes_arch(&rand
, sizeof(rand
));
19725 #endif /* BCM_ASLR_HEAP */
19727 #ifdef DHD_PKT_LOGGING
19729 dhd_pktlog_dump(void *handle
, void *event_info
, u8 event
)
19731 dhd_info_t
*dhd
= handle
;
19734 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
19738 if (dhd_pktlog_write_file(&dhd
->pub
)) {
19739 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__
));
19745 dhd_schedule_pktlog_dump(dhd_pub_t
*dhdp
)
19747 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
19748 (void*)NULL
, DHD_WQ_WORK_PKTLOG_DUMP
,
19749 dhd_pktlog_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
19751 #endif /* DHD_PKT_LOGGING */
19753 void *dhd_get_pub(struct net_device
*dev
)
19755 dhd_info_t
*dhdinfo
= *(dhd_info_t
**)netdev_priv(dev
);
19757 return (void *)&dhdinfo
->pub
;
19759 printf("%s: null dhdinfo\n", __FUNCTION__
);
19764 void *dhd_get_conf(struct net_device
*dev
)
19766 dhd_info_t
*dhdinfo
= *(dhd_info_t
**)netdev_priv(dev
);
19768 return (void *)dhdinfo
->pub
.conf
;
19770 printf("%s: null dhdinfo\n", __FUNCTION__
);
19775 bool dhd_os_wd_timer_enabled(void *bus
)
19777 dhd_pub_t
*pub
= bus
;
19778 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
19780 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
19782 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
19785 return dhd
->wd_timer_valid
;