# bcmdhd
-# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
-# otherwise please remove it.
# if not confiure pci mode, we use sdio mode as default
ifeq ($(CONFIG_BCMDHD_PCIE),)
$(info bcm SDIO driver configured)
CONFIG_DHD_USE_STATIC_BUF := y
endif
+
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
+MODULE_NAME := dhd
+else
+MODULE_NAME := bcmdhd
+endif
+#CONFIG_BCMDHD := m
#CONFIG_BCMDHD_SDIO := y
#CONFIG_BCMDHD_PCIE := y
#CONFIG_BCMDHD_USB := y
+
+#CONFIG_BCMDHD_CUSB := y
CONFIG_BCMDHD_PROPTXSTATUS := y
+#CONFIG_BCMDHD_DEBUG := y
CONFIG_MACH_PLATFORM := y
#CONFIG_BCMDHD_DTS := y
export CONFIG_VTS_SUPPORT = y
DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
- -Wno-maybe-uninitialized -Werror \
- -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
+ -Wno-maybe-uninitialized -Werror \
+ -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \
-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \
-DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \
-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT \
-DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP \
- -DWL_EXT_IAPSTA \
+ -DPOWERUP_MAX_RETRY=0 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \
+ -DWL_EXT_IAPSTA -DWL_ESCAN \
-DENABLE_INSMOD_NO_FW_LOAD \
-I$(src) -I$(src)/include
dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \
- hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o \
- dhd_config.o wl_android_ext.o
+ hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \
+ dhd_linux_exportfs.o dhd_linux_pktdump.o \
+ dhd_config.o wl_event.o wl_android_ext.o wl_escan.o
#BCMDHD_SDIO
ifneq ($(CONFIG_BCMDHD_SDIO),)
DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \
-DSDTEST -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \
- -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD
+ -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \
+ -DBCMSDIO_RXLIM_POST
ifeq ($(CONFIG_BCMDHD_OOB),y)
DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB
ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
else
DHDCFLAGS += -DSDIO_ISR_THREAD
endif
-
DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
dhd_sdio.o dhd_cdc.o dhd_wlfc.o
endif
ifneq ($(CONFIG_BCMDHD_PCIE),)
DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \
-DDONGLE_ENABLE_ISOLATION
+DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP
+DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED
ifneq ($(CONFIG_PCI_MSI),)
DHDCFLAGS += -DDHD_USE_MSI
endif
-
DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
- dhd_msgbuf.o
+ dhd_msgbuf.o dhd_linux_lb.o
endif
#BCMDHD_USB
DHDCFLAGS += -DBCMUSBDEV_COMPOSITE
DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS))
endif
-
DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o
endif
endif
endif
+ifeq ($(CONFIG_64BIT),y)
+ DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS))
+ DHDCFLAGS += -DBCMDMA64OSL
+endif
+
#VTS_SUPPORT
ifeq ($(CONFIG_VTS_SUPPORT),y)
ifneq ($(CONFIG_CFG80211),)
-DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG \
- -DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DPKT_FILTER_SUPPORT \
- -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS \
- -DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP
-
+DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT \
+ -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS \
+ -DDEBUGABILITY -DDBG_PKT_MON -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \
+ -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS
DHDOFILES += dhd_rtt.o bcm_app_utils.o
endif
endif
+# For Debug
+ifneq ($(CONFIG_BCMDHD_DEBUG),)
+DHDCFLAGS += -DDEBUGFS_CFG80211
+DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \
+ -DBCMASSERT_LOG -DSI_ERROR_ENFORCE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+ DHDCFLAGS += -DEWP_EDL
+ DHDCFLAGS += -DDNGL_EVENT_SUPPORT
+ DHDCFLAGS += -DDHD_SSSR_DUMP
+endif
+endif
+
# MESH support for kernel 3.10 later
ifeq ($(CONFIG_WL_MESH),y)
DHDCFLAGS += -DWLMESH
+ifneq ($(CONFIG_CFG80211),)
+ DHDCFLAGS += -DWLMESH_CFG80211
+endif
ifneq ($(CONFIG_BCMDHD_PCIE),)
DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000
endif
DHDCFLAGS :=$(filter-out -DSET_RANDOM_MAC_SOFTAP,$(DHDCFLAGS))
endif
-ifeq ($(CONFIG_BCMDHD_SDIO),y)
-obj-$(CONFIG_BCMDHD) += dhd.o
-dhd-objs += $(DHDOFILES)
-else
-obj-$(CONFIG_BCMDHD) += bcmdhd.o
-bcmdhd-objs += $(DHDOFILES)
+ifeq ($(CONFIG_WL_EASYMESH),y)
+ DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+ DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS))
+DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG
endif
+obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o
+$(MODULE_NAME)-objs += $(DHDOFILES)
+
ifeq ($(CONFIG_MACH_PLATFORM),y)
DHDOFILES += dhd_gpio.o
ifeq ($(CONFIG_BCMDHD_DTS),y)
endif
ifneq ($(CONFIG_WIRELESS_EXT),)
- DHDOFILES += wl_iw.o wl_escan.o
- DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN
+ DHDOFILES += wl_iw.o
+ DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
endif
ifneq ($(CONFIG_CFG80211),)
- DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
+ DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o
+ DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
DHDOFILES += dhd_cfg80211.o
DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
- DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
- DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
- DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
- DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7
+ DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
DHDCFLAGS += -DWL_VIRTUAL_APSTA
- DHDCFLAGS += -DPNO_SUPPORT
+ DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP
+ DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK
+ DHDCFLAGS += -DWL_STATIC_IF
endif
EXTRA_CFLAGS = $(DHDCFLAGS)
ifeq ($(CONFIG_BCMDHD),m)
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: aiutils.c 769534 2018-06-26 21:19:11Z $
+ * $Id: aiutils.c 823201 2019-06-03 03:49:36Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
case PCMCIA_BUS:
default:
- SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+ SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
ASSERT(0);
return;
}
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
&addrl, &addrh, &sizel, &sizeh);
if (asd != 0) {
- sii->oob_router = addrl;
+ if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
+ sii->oob_router1 = addrl;
+ } else {
+ sii->oob_router = addrl;
+ }
}
}
if (cid != NS_CCB_CORE_ID &&
cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
- cid != HUB_CORE_ID)
+ cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
continue;
}
if (cc == NULL)
goto error;
+ BCM_REFERENCE(erombase);
erombase = R_REG(sii->osh, &cc->eromptr);
eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
return cores_info->coresba[cidx];
else if (baidx == CORE_BASE_ADDR_1)
return cores_info->coresba2[cidx];
- }
- else if (spidx == CORE_SLAVE_PORT_1) {
+ } else if (spidx == CORE_SLAVE_PORT_1) {
if (baidx == CORE_BASE_ADDR_0)
return cores_info->csp2ba[cidx];
}
__FUNCTION__, baidx, spidx));
return 0;
-
}
/* Return the size of the nth address space in the current core
return cores_info->coresba_size[cidx];
else if (baidx == CORE_BASE_ADDR_1)
return cores_info->coresba2_size[cidx];
- }
- else if (spidx == CORE_SLAVE_PORT_1) {
+ } else if (spidx == CORE_SLAVE_PORT_1) {
if (baidx == CORE_BASE_ADDR_0)
return cores_info->csp2ba_size[cidx];
}
}
#endif /* UCM_CORRUPTION_WAR */
OSL_DELAY(1);
-
}
void
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Bloom filter support
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmbloom.c 714397 2017-08-04 08:24:38Z $
+ * $Id: bcmbloom.c 788740 2018-11-13 21:45:01Z $
*/
#include <typedefs.h>
pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
/* all bits must be set for a match */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) {
+ CLANG_DIAGNOSTIC_POP()
err = BCME_NOTFOUND;
break;
}
/*
* bcmevent read-only data shared by kernel or app layers
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmevent.c 755881 2018-04-05 06:32:32Z $
+ * $Id: bcmevent.c 807989 2019-03-05 07:57:42Z $
*/
#include <typedefs.h>
#ifdef WLWNM
BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
#endif /* WLWNM */
-#if defined(WL_PROXDETECT)
+#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT)
BCMEVENT_NAME(WLC_E_PROXD),
#endif // endif
BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
#endif /* WL_NAN */
BCMEVENT_NAME(WLC_E_RPSNOA),
BCMEVENT_NAME(WLC_E_PHY_CAL),
+ BCMEVENT_NAME(WLC_E_WA_LQM),
};
const char *bcmevent_get_name(uint event_type)
* BCMSDH interface glue
* implement bcmsdh API for SDIOH driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
ASSERT(bcmsdh->init_success);
- if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) {
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) {
bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock
return 0xFFFFFFFF;
}
/*
* SDIO access interface for drivers - linux specific (pci only)
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
} bcmsdh_os_info_t;
/* debugging macros */
-#define SDLX_MSG(x)
+#define SDLX_MSG(x) printf x
/**
* Checks to see if vendor and device IDs match a supported SDIO Host Controller.
int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
{
+#if defined(OOB_INTR_ONLY)
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
unsigned long flags;
- int ret;
+#endif
+ int ret = 0;
+#if defined(OOB_INTR_ONLY)
spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
ret = bcmsdh->pkt_wake;
bcmsdh->pkt_wake = flag;
spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+#endif
return ret;
}
#endif /* DHD_WAKE_STATUS */
SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
#else
- err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
- if (err)
- SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
- else
- bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+ bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
#endif
return 0;
return;
}
if (bcmsdh_osinfo->oob_irq_wake_enabled) {
- err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
- if (!err)
- bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ if (device_may_wakeup(bcmsdh_osinfo->dev)) {
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (!err)
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
+ }
+#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
}
if (bcmsdh_osinfo->oob_irq_enabled) {
disable_irq(bcmsdh_osinfo->oob_irq_num);
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Proprietary,Open:>>
*
- * $Id: bcmsdh_sdmmc.c 766268 2018-06-07 06:44:55Z $
+ * $Id: bcmsdh_sdmmc.c 782528 2018-09-28 12:15:40Z $
*/
#include <typedefs.h>
static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
extern int sdio_reset_comm(struct mmc_card *card);
-#else
-int sdio_reset_comm(struct mmc_card *card)
-{
- return 0;
-}
#endif
#ifdef GLOBAL_SDMMC_INSTANCE
extern PBCMSDH_SDMMC_INSTANCE gInstance;
/* Now set it */
si->client_block_size[func] = blksize;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
if (si->func[func] == NULL) {
sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
bcmerror = BCME_NORESOURCE;
sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
__FUNCTION__, func, blksize, bcmerror));
sdio_release_host(si->func[func]);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
break;
}
#if defined(MMC_SDIO_ABORT)
int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
#endif // endif
- struct timespec now, before;
+ struct osl_timespec now, before;
if (sd_msglevel & SDH_COST_VAL)
- getnstimeofday(&before);
+ osl_do_gettimeofday(&before);
sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
}
}
#endif /* MMC_SDIO_ABORT */
+ /* to allow abort command through F1 */
+#if defined(SDIO_ISR_THREAD)
+ else if (regaddr == SDIOD_CCCR_INTR_EXTN) {
+ while (sdio_abort_retry--) {
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with
+ * another api depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(sd->func[func],
+ *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
+ }
+ if (!err_ret)
+ break;
+ }
+ }
+#endif
else if (regaddr < 0xF0) {
sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
} else {
}
if (sd_msglevel & SDH_COST_VAL) {
- getnstimeofday(&now);
+ osl_do_gettimeofday(&now);
sd_cost(("%s: rw=%d len=1 cost=%lds %luus\n", __FUNCTION__,
rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
}
#if defined(MMC_SDIO_ABORT)
int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
#endif // endif
- struct timespec now, before;
+ struct osl_timespec now, before;
if (sd_msglevel & SDH_COST_VAL)
- getnstimeofday(&before);
+ osl_do_gettimeofday(&before);
if (func == 0) {
sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
}
if (sd_msglevel & SDH_COST_VAL) {
- getnstimeofday(&now);
+ osl_do_gettimeofday(&now);
sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__,
rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
uint8 *localbuf = NULL;
uint local_plen = 0;
uint pkt_len = 0;
- struct timespec now, before;
+ struct osl_timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(pkt);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
if (sd_msglevel & SDH_COST_VAL)
- getnstimeofday(&before);
+ osl_do_gettimeofday(&before);
blk_size = sd->client_block_size[func];
max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
if (sg_count >= ARRAYSIZE(sd->sg_list)) {
sd_err(("%s: sg list entries(%u) exceed limit(%zu),"
" sd blk_size=%u\n",
- __FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size));
+ __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size));
return (SDIOH_API_RC_FAIL);
}
pdata += pkt_offset;
MFREE(sd->osh, localbuf, ttl_len);
if (sd_msglevel & SDH_COST_VAL) {
- getnstimeofday(&now);
+ osl_do_gettimeofday(&now);
sd_cost(("%s: rw=%d, ttl_len=%d, cost=%lds %luus\n", __FUNCTION__,
write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
}
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
int err_ret = 0;
- struct timespec now, before;
+ struct osl_timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(buf);
if (sd_msglevel & SDH_COST_VAL)
- getnstimeofday(&before);
+ osl_do_gettimeofday(&before);
/* NOTE:
* For all writes, each packet length is aligned to 32 (or 4)
sd_trace(("%s: Exit\n", __FUNCTION__));
if (sd_msglevel & SDH_COST_VAL) {
- getnstimeofday(&now);
+ osl_do_gettimeofday(&now);
sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__,
write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
{
SDIOH_API_RC status;
void *tmppkt;
- struct timespec now, before;
+ struct osl_timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
if (sd_msglevel & SDH_COST_VAL)
- getnstimeofday(&before);
+ osl_do_gettimeofday(&before);
if (pkt) {
#ifdef BCMSDIOH_TXGLOM
PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
if (sd_msglevel & SDH_COST_VAL) {
- getnstimeofday(&now);
+ osl_do_gettimeofday(&now);
sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__,
buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
2.6.27. The implementation prior to that is buggy, and needs broadcom's
patch for it
*/
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
if ((ret = sdio_reset_comm(sd->func[0]->card))) {
sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
return ret;
- }
- else {
+ } else
+#endif
+ {
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Proprietary,Open:>>
*
- * $Id: bcmsdh_sdmmc_linux.c 753315 2018-03-21 04:10:12Z $
+ * $Id: bcmsdh_sdmmc_linux.c 825481 2019-06-14 10:06:03Z $
*/
#include <typedefs.h>
#include <dhd_linux.h>
#include <bcmsdh_sdmmc.h>
#include <dhd_dbg.h>
+#include <bcmdevs.h>
#if !defined(SDIO_VENDOR_ID_BROADCOM)
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4362)
-#define SDIO_DEVICE_ID_BROADCOM_4362 0x4362
-#endif // endif
-
extern void wl_cfg80211_set_parent_dev(void *dev);
extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
/* devices we support, null terminated */
static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4362) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) },
+ /* { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_ANY_ID) }, */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
- { 0, 0, 0, 0 /* end: all zeroes */
- },
+ /* end: all zeroes */
+ { 0, 0, 0, 0},
};
MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
/*
* Broadcom SPI Host Controller Driver - Linux Per-port
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom BCMSDH to gSPI Protocol Conversion Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom Secure Standard Library.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmutils.c 760883 2018-05-03 22:57:07Z $
+ * $Id: bcmutils.c 813798 2019-04-08 10:20:21Z $
*/
#include <bcm_cfg.h>
total = 0;
for (; p; p = PKTNEXT(osh, p)) {
len = PKTLEN(osh, p);
- total += len;
+ total += (uint)len;
#ifdef BCMLFRAG
if (BCMLFRAG_ENAB()) {
if (PKTISFRAG(osh, p)) {
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= PKTLEN(osh, p);
+ offset -= (uint)PKTLEN(osh, p);
}
if (!p)
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= PKTLEN(osh, p);
+ offset -= (uint)PKTLEN(osh, p);
}
if (!p)
for (; p; p = PKTNEXT(osh, p)) {
pdata = (uint8 *) PKTDATA(osh, p);
pkt_off = offset - len;
- len += PKTLEN(osh, p);
+ len += (uint)PKTLEN(osh, p);
if (len > offset)
break;
}
return NULL;
for (; p; p = PKTNEXT(osh, p)) {
- len += PKTLEN(osh, p);
+ len += (uint)PKTLEN(osh, p);
if (len > offset)
break;
}
printf("%s:\n", msg);
for (p = p0; p; p = PKTNEXT(osh, p))
- prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+ prhex(NULL, PKTDATA(osh, p), (uint)PKTLEN(osh, p));
}
#endif // endif
struct ether_header *eh;
struct ethervlan_header *evh;
uint8 *pktdata;
- int priority = 0;
- int rc = 0;
+ uint priority = 0;
+ uint rc = 0;
pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
uint16 vlan_tag;
- int vlan_prio, dscp_prio = 0;
+ uint vlan_prio, dscp_prio = 0;
evh = (struct ethervlan_header *)eh;
vlan_tag = ntoh16(evh->vlan_tag);
- vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+ vlan_prio = (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
- uint8 tos_tc = IP_TOS46(ip_body);
- dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ uint8 tos_tc = (uint8)IP_TOS46(ip_body);
+ dscp_prio = tos_tc >> IPV4_TOS_PREC_SHIFT;
}
/* DSCP priority gets precedence over 802.1P (vlan tag) */
} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ether_header);
- uint8 tos_tc = IP_TOS46(ip_body);
+ uint8 tos_tc = (uint8)IP_TOS46(ip_body);
uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
switch (dscp) {
case DSCP_EF:
+ case DSCP_VA:
priority = PRIO_8021D_VO;
break;
case DSCP_AF31:
case DSCP_AF32:
case DSCP_AF33:
+ case DSCP_CS3:
priority = PRIO_8021D_CL;
break;
case DSCP_AF21:
case DSCP_AF22:
case DSCP_AF23:
+ priority = PRIO_8021D_EE;
+ break;
case DSCP_AF11:
case DSCP_AF12:
case DSCP_AF13:
- priority = PRIO_8021D_EE;
+ case DSCP_CS2:
+ priority = PRIO_8021D_BE;
+ break;
+ case DSCP_CS6:
+ case DSCP_CS7:
+ priority = PRIO_8021D_NC;
break;
default:
- priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ priority = tos_tc >> IPV4_TOS_PREC_SHIFT;
break;
}
rc |= PKTPRIO_DSCP;
}
- ASSERT(priority >= 0 && priority <= MAXPRIO);
- PKTSETPRIO(pkt, priority);
+ ASSERT(priority <= MAXPRIO);
+ PKTSETPRIO(pkt, (int)priority);
return (rc | priority);
}
uint rc = 0;
pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
- pktlen = PKTLEN(OSH_NULL, pkt);
+ pktlen = (uint)PKTLEN(OSH_NULL, pkt);
if (pktgetdscp(pktdata, pktlen, &dscp)) {
rc = PKTPRIO_DSCP;
user_priority = dscp2up(up_table, dscp);
- PKTSETPRIO(pkt, user_priority);
+ PKTSETPRIO(pkt, (int)user_priority);
}
return (rc | user_priority);
if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
ip_body = pktdata + sizeof(struct ether_header);
- *dscp = IP_DSCP46(ip_body);
+ *dscp = (uint8)IP_DSCP46(ip_body);
rc = TRUE;
}
else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
evh->ether_type == HTON16(ETHER_TYPE_IP)) {
ip_body = pktdata + sizeof(struct ethervlan_header);
- *dscp = IP_DSCP46(ip_body);
+ *dscp = (uint8)IP_DSCP46(ip_body);
rc = TRUE;
}
}
uint8 *except_ptr = (uint8 *)qos_map_ie->data;
uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
uint8 *range_ptr = except_ptr + except_len;
- int i;
+ uint8 i;
/* fill in ranges */
for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
BCMRAMFN(bcmerrorstr)(int bcmerror)
{
/* check if someone added a bcmerror code but forgot to add errorstring */
- ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+ ASSERT((uint)ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
if (bcmerror > 0 || bcmerror < BCME_LAST) {
snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
bcm_count_leading_zeros(bitmap));
bitix = (BCM_MWBMAP_BITS_WORD - 1)
- - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
/* Clear bit if wd count is 0, without conditional branch */
count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
mwbmap_p->wd_count[wordix]--;
- count = mwbmap_p->wd_count[wordix];
+ count = (uint32)mwbmap_p->wd_count[wordix];
MWBMAP_ASSERT(count ==
(bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
MWBMAP_ASSERT(count >= 0);
/* clear wd_bitmap bit if id_map count is 0 */
- bitmap = (count == 0) << bitix;
+ bitmap = ((uint32)(count == 0)) << BCM_MWBMAP_MODOP(bitix);
MWBMAP_DBG((
"Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
bcm_count_leading_zeros(bitmap));
bitix = BCM_MWBMAP_MULOP(wordix)
+ (BCM_MWBMAP_BITS_WORD - 1)
- - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
mwbmap_p->ifree--; /* decrement system wide free count */
MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
mwbmap_p->wd_count[bitix]--;
- count = mwbmap_p->wd_count[bitix];
+ count = (uint32)mwbmap_p->wd_count[bitix];
MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
MWBMAP_ASSERT(count >= 0);
- bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+ bitmap = (uint32)(count == 0) << BCM_MWBMAP_MODOP(bitix);
MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
ASSERT(mwbmap_p->ifree >= 0);
- return mwbmap_p->ifree;
+ return (uint32)mwbmap_p->ifree;
}
/* Determine whether an index is inuse or free */
#if defined(BCM_MWBMAP_USE_CNTSETBITS)
count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = mwbmap_p->wd_count[ix];
+ count = (uint32)mwbmap_p->wd_count[ix];
MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
#if defined(BCM_MWBMAP_USE_CNTSETBITS)
count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = mwbmap_p->wd_count[idmap_ix];
+ count = (uint32)mwbmap_p->wd_count[idmap_ix];
ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
ASSERT(count != 0U);
id16_map_t * id16_map;
ASSERT(id16_map_hndl != NULL);
-
+ if (!id16_map_hndl) {
+ return ID16_INVALID;
+ }
id16_map = (id16_map_t *)id16_map_hndl;
ASSERT(id16_map->total > 0);
id16_map_t * id16_map;
ASSERT(id16_map_hndl != NULL);
-
+ if (!id16_map_hndl) {
+ goto done;
+ }
id16_map = (id16_map_t *)id16_map_hndl;
ASSERT(id16_map->stack_idx >= -1);
void
dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
{
- uint32 mem_size;
- mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ uint32 memsize;
+ memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
if (pool)
- MFREE(osh, pool, mem_size);
+ MFREE(osh, pool, memsize);
}
dll_pool_t *
dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
{
- uint32 mem_size, i;
+ uint32 memsize, i;
dll_pool_t * dll_pool_p;
dll_t * elem_p;
ASSERT(elem_size > sizeof(dll_t));
- mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
- if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
+ if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, memsize)) == NULL) {
printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
elems_max, elem_size);
ASSERT(0);
if ((r == -1) || (r >= (int)b->size)) {
b->size = 0;
} else {
- b->size -= r;
+ b->size -= (uint)r;
b->buf += r;
}
} else if (bcm_isprint((uchar)c)) {
*p++ = (char)c;
} else {
- p += snprintf(p, (endp - p), "\\x%02X", c);
+ p += snprintf(p, (size_t)(endp - p), "\\x%02X", c);
}
}
*p = '\0';
result = 0;
while (bcm_isxdigit(*cp) &&
- (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ (value = (uint64)(bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10)) < base) {
result = result*base + value;
/* Detected overflow */
if (result < last_result && !minus) {
len = (int)strlen(haystack) - nlen + 1;
for (i = 0; i < len; i++)
- if (memcmp(needle, &haystack[i], nlen) == 0)
+ if (memcmp(needle, &haystack[i], (size_t)nlen) == 0)
return DISCARD_QUAL(&haystack[i], char);
return (NULL);
}
for (; *str; str++) {
if (map[*str >> 5] & (1 << (*str & 31))) {
if (tokdelim != NULL) {
- *tokdelim = *str;
+ *tokdelim = (char)*str;
}
*str++ = '\0';
char *ep;
for (;;) {
- ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ ea->octet[i++] = (uint8) bcm_strtoul(p, &ep, 16);
p = ep;
if (!*p++ || i == 6)
break;
*/
if (datalen > 0) {
- memcpy(dst_tlv->data, data, datalen);
+ memcpy(dst_tlv->data, data, (size_t)datalen);
}
/* update the output destination poitner to point past
/* if len + tlv hdr len is more than destlen, don't do anything
* just return the buffer untouched
*/
- if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+ if ((int)(datalen + (int)BCM_TLV_HDR_SIZE) <= dst_maxlen) {
new_dst = bcm_write_tlv(type, data, datalen, dst);
}
GCC_DIAGNOSTIC_POP();
}
+/**
+ * Advance a const tlv buffer pointer and length up to the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter
+ * will be set to NULL and *buflen parameter will be set to zero. Otherwise,
+ * *buffer will point to elt, and *buflen will have been adjusted by the the
+ * difference between *buffer and elt.
+ */
+void
+bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ uint new_buflen;
+ const uint8 *new_buffer;
+
+ new_buffer = (const uint8*)elt;
+
+ /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap,
+ * and that the elt pointer is in the range of [buffer, buffer + buflen]
+ */
+ if ((*buffer != NULL) &&
+ ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) &&
+ (new_buffer >= *buffer) &&
+ (new_buffer < (*buffer + *buflen))) {
+ /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint
+ * from ptrdiff is ok
+ */
+ uint delta = (uint)(new_buffer - *buffer);
+
+ /* New buffer length is old len minus the delta from the buffer start to elt.
+ * The check just above guarantees that the subtractions does not underflow.
+ */
+ new_buflen = *buflen - delta;
+
+ /* validate current elt */
+ if (bcm_valid_tlv(elt, new_buflen)) {
+ /* All good, so update the input/output parameters */
+ *buffer = new_buffer;
+ *buflen = new_buflen;
+ return;
+ }
+ }
+
+ /* something did not check out, clear out the buffer info */
+ *buffer = NULL;
+ *buflen = 0;
+
+ return;
+}
+
+/**
+ * Advance a const tlv buffer pointer and length past the given tlv element pointer
+ * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
+ * are all in the range of the buffer/length. The function also checks that the
+ * remaining buffer starts with a valid tlv.
+ *
+ * @param elt pointer to a valid bcm_tlv_t in the buffer
+ * @param buffer pointer to a tlv buffer
+ * @param buflen length of the buffer in bytes
+ *
+ * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer
+ * following the elt does not begin with a tlv in the buffer bounds, the *buffer
+ * parameter will be set to NULL and *buflen parameter will be set to zero.
+ * Otherwise, *buffer will point to the first byte past elt, and *buflen will
+ * have the remaining buffer length.
+ */
+void
+bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+{
+ /* Start by advancing the buffer up to the given elt */
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ /* if that did not work, bail out */
+ if (*buflen == 0) {
+ return;
+ }
+
+#if defined(__COVERITY__)
+ /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element,
+ * so its elt->len is in the bounds of the buffer. The following check prevents
+ * Coverity from flagging the (elt->data + elt->len) statement below as using a
+ * tainted elt->len to index into array 'elt->data'.
+ */
+ if (elt->len > *buflen) {
+ return;
+ }
+#endif /* __COVERITY__ */
+
+ /* We know we are advanced up to a good tlv.
+ * Now just advance to the following tlv.
+ */
+ elt = (const bcm_tlv_t*)(elt->data + elt->len);
+
+ bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+
+ return;
+}
+
/*
* Traverse a string of 1-byte tag/1-byte length/variable-length value
* triples, returning a pointer to the substring whose first element
bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen)
{
bcm_tlv_t * ret;
- ret = bcm_parse_tlvs(buf, buflen, key);
+ ret = bcm_parse_tlvs(buf, (uint)buflen, key);
if (ret == NULL || ret->len < min_bodylen) {
return NULL;
}
bit = bd->bitfield[i].bit;
if ((flags & mask) == bit) {
if (len > (int)strlen(name)) {
- slen = strlen(name);
- strncpy(buf, name, slen+1);
+ slen = (int)strlen(name);
+ strncpy(buf, name, (size_t)slen+1);
}
break;
}
} else if ((flags & bit) == 0)
continue;
flags &= ~bit;
- nlen = strlen(name);
+ nlen = (int)strlen(name);
slen += nlen;
/* count btwn flag space */
if (flags != 0)
if (len <= slen)
break;
/* copy NULL char but don't count it */
- strncpy(p, name, nlen + 1);
+ strncpy(p, name, (size_t)nlen + 1);
p += nlen;
/* copy btwn flag space and NULL char */
if (flags != 0)
for (i = 0; i < bdsz; i++) {
bit = bd[i].bit;
name = bd[i].name;
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (isset(addr, bit)) {
- nlen = strlen(name);
+ CLANG_DIAGNOSTIC_POP();
+ nlen = (int)strlen(name);
slen += nlen;
/* need SPACE - for simplicity */
slen += 1;
more = TRUE;
break;
}
- memcpy(p, name, nlen);
+ memcpy(p, name, (size_t)nlen);
p += nlen;
p[0] = ' ';
p += 1;
p = line;
for (i = 0; i < nbytes; i++) {
if (i % 16 == 0) {
- nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
+ nchar = snprintf(p, (size_t)len, " %04x: ", i); /* line prefix */
p += nchar;
len -= nchar;
}
if (len > 0) {
- nchar = snprintf(p, len, "%02x ", buf[i]);
+ nchar = snprintf(p, (size_t)len, "%02x ", buf[i]);
p += nchar;
len -= nchar;
}
const char *fmt;
fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ /*
+ * The following call to snprintf generates a compiler warning
+ * due to -Wformat-nonliteral. However, the format string is coming
+ * from internal callers rather than external data input, and is a
+ * useful debugging tool serving a variety of diagnostics. Rather
+ * than expand code size by replicating multiple functions with different
+ * argument lists, or disabling the warning globally, let's consider
+ * if we can just disable the warning for this one instance.
+ */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
snprintf(buf, len, fmt, chipid);
+ CLANG_DIAGNOSTIC_POP()
return buf;
}
while (bufsize > 1) {
if (cur_ptr->nameandfmt == NULL)
break;
+
+ /*
+ * The following call to snprintf generates a compiler warning
+ * due to -Wformat-nonliteral. However, the format string is coming
+ * from internal callers rather than external data input, and is a
+ * useful debugging tool serving a variety of diagnostics. Rather
+ * than expand code size by replicating multiple functions with different
+ * argument lists, or disabling the warning globally, let's consider
+ * if we can just disable the warning for this one instance.
+ */
+ CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
- read_rtn(arg0, arg1, cur_ptr->offset));
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ CLANG_DIAGNOSTIC_POP()
/* check for snprintf overflow or error */
if (len < 0 || (uint32)len >= bufsize)
- len = bufsize - 1;
+ len = (int)(bufsize - 1);
buf += len;
- bufsize -= len;
- filled_len += len;
+ bufsize -= (uint32)len;
+ filled_len += (uint32)len;
cur_ptr++;
}
return filled_len;
/* return the mW value scaled down to the correct factor of 10,
* adding in factor/2 to get proper rounding.
*/
- return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+ return (uint16)((nqdBm_to_mW_map[idx] + factor/2) / factor);
}
uint8
break;
nv_ver[n-1] = varbuf[n];
}
- printk("NVRAM version: %s\n", nv_ver);
+ printf("NVRAM version: %s\n", nv_ver);
}
for (n = 0; n < len; n++) {
bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
{
while (len > 1) {
- sum += (buf[0] << 8) | buf[1];
+ sum += (uint32)((buf[0] << 8) | buf[1]);
buf += 2;
len -= 2;
}
if (len > 0) {
- sum += (*buf) << 8;
+ sum += (uint32)((*buf) << 8);
}
while (sum >> 16) {
ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2;
/* return calculated chksum */
- return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip));
+ return ip_cksum(sum, ptr, (uint32)((uint)ip_len - OFFSETOF(struct ipv4_hdr, src_ip)));
}
/* calculate TCP header checksum using partial sum */
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+ if (!ip || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
+ return 0;
/* pseudo header cksum */
memset(&tcp_ps, 0, sizeof(tcp_ps));
memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN);
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+ if (!ipv6 || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
+ return 0;
/* pseudo header cksum */
memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo));
memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr,
/* all bits are in the same byte */
if (fbyte == lbyte) {
- mask = ((1 << nbits) - 1) << fbit;
+ mask = (uint8)(((1 << nbits) - 1) << fbit);
addr[fbyte] &= ~mask;
addr[fbyte] |= (uint8)(val << fbit);
return;
/* first partial byte */
if (fbit > 0) {
- mask = (0xff << fbit);
+ mask = (uint8)(0xff << fbit);
addr[fbyte] &= ~mask;
addr[fbyte] |= (uint8)(val << fbit);
val >>= (8 - fbit);
/* last partial byte */
if (rbits > 0) {
- mask = (1 << rbits) - 1;
+ mask = (uint8)((1 << rbits) - 1);
addr[lbyte] &= ~mask;
addr[lbyte] |= (uint8)(val >> (nbits - rbits));
lbyte --; /* last full byte */
/* all bits are in the same byte */
if (fbyte == lbyte) {
- mask = ((1 << nbits) - 1) << fbit;
+ mask = (uint8)(((1 << nbits) - 1) << fbit);
val = (addr[fbyte] & mask) >> fbit;
return val;
}
/* first partial byte */
if (fbit > 0) {
bits = 8 - fbit;
- mask = (0xff << fbit);
+ mask = (uint8)(0xFFu << fbit);
val |= (addr[fbyte] & mask) >> fbit;
fbyte ++; /* first full byte */
}
/* last partial byte */
if (rbits > 0) {
- mask = (1 << rbits) - 1;
- val |= (addr[lbyte] & mask) << (nbits - rbits);
+ mask = (uint8)((1 << rbits) - 1);
+ val |= (uint32)((addr[lbyte] & mask) << (nbits - rbits));
lbyte --; /* last full byte */
}
/* remaining full byte(s) */
for (byte = fbyte; byte <= lbyte; byte ++) {
- val |= (addr[byte] << (((byte - fbyte) << 3) + bits));
+ val |= (uint32)((addr[byte] << (((byte - fbyte) << 3) + bits)));
}
return val;
unsigned int *datalen)
{
char *p;
- int variable_heading_len, record_len, variable_record_len = strlen(variable) + 1;
+ int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1;
char *buf_end = varbuf + buflen;
p = strchr(variable, '=');
if (!p) {
/* Scanning NVRAM, record by record up to trailing 0 */
for (p = varbuf; *p; p += strlen(p) + 1) {
/* If given variable found - remove it */
- if (!strncmp(p, variable, variable_heading_len)) {
- record_len = strlen(p) + 1;
- memmove_s(p, buf_end - p, p + record_len, buf_end - (p + record_len));
+ if (!strncmp(p, variable, (size_t)variable_heading_len)) {
+ record_len = (int)strlen(p) + 1;
+ memmove_s(p, buf_end - p, p + record_len,
+ (size_t)(buf_end - (p + record_len)));
}
}
/* If buffer does not have space for given variable - return FALSE */
return FALSE;
}
/* Copy given variable to end of buffer */
- memmove_s(p, buf_end - p, variable, variable_record_len);
+ memmove_s(p, buf_end - p, variable, (size_t)variable_record_len);
/* Adding trailing 0 */
p[variable_record_len] = 0;
/* Setting optional output parameter - length of data in buffer */
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmwifi_channels.c 695288 2017-04-19 17:20:39Z $
+ * $Id: bcmwifi_channels.c 806092 2019-02-21 08:19:13Z $
*/
#include <bcm_cfg.h>
return -1;
}
- return sb;
+ return (int)sb;
}
/* return primary20 channel given center channel and side band */
uint i;
for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
if (ch == wf_5g_80m_chans[i])
- return i;
+ return (int)i;
}
return -1;
if (!read_uint(&a, &num))
return 0;
/* if we are looking at a 'g', then the first number was a band */
- c = tolower((int)a[0]);
+ c = tolower(a[0]);
if (c == 'g') {
a++; /* consume the char */
if (!read_uint(&a, &pri_ch))
return 0;
- c = tolower((int)a[0]);
+ c = tolower(a[0]);
}
else {
/* first number is channel, use default for band */
* or '+80' if bw = 80, to make '80+80' bw.
*/
- c = tolower((int)a[0]);
+ c = (char)tolower((int)a[0]);
/* if we have a 2g/40 channel, we should have a l/u spec now */
if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
sb = channel_to_sb(center_ch[i], pri_ch, bw);
if (sb >= 0) {
chspec_ch = center_ch[i];
- chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
break;
}
}
return 0;
}
- chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
}
- chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+ chspec = (chanspec_t)(chspec_ch | chspec_band | chspec_bw | chspec_sb);
if (wf_chspec_malformed(chspec))
return 0;
return same_primary;
}
+/**
+ * Create a 20MHz chanspec for the given band.
+ *
+ * This function returns a 20MHz chanspec in the given band.
+ *
+ * @param channel 20MHz channel number
+ * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G)
+ *
+ * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error.
+ */
+chanspec_t
+wf_create_20MHz_chspec(uint channel, chanspec_band_t band)
+{
+ chanspec_t chspec;
+
+ if (channel <= WL_CHANSPEC_CHAN_MASK &&
+ (band == WL_CHANSPEC_BAND_2G ||
+ band == WL_CHANSPEC_BAND_5G)) {
+ chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel;
+ if (!wf_chspec_valid(chspec)) {
+ chspec = INVCHANSPEC;
+ }
+ } else {
+ chspec = INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
/**
* Return the primary 20MHz channel.
*
}
/* Create primary 40MHz chanspec */
- chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+ chspec40 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
sb | center_chan);
}
if ((freq < base) || (freq > base + 1000))
return -1;
- offset = freq - base;
+ offset = (int)(freq - base);
ch = offset / 5;
/* check that frequency is a 5MHz multiple from the base */
else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
freq = 2484;
else
- freq = ch * 5 + start_factor / 2;
+ freq = (int)(ch * 5 + start_factor / 2);
return freq;
}
seg1 = chan0_id;
}
- chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+ chanspec = (uint16)((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
(seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
(sb << WL_CHANSPEC_CTL_SB_SHIFT) |
WL_CHANSPEC_BW_8080 |
center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
/* Create primary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else if (CHSPEC_IS160(chspec)) {
center_chan = CHSPEC_CHANNEL(chspec);
}
/* Create primary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else {
chspec80 = INVCHANSPEC;
center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec));
/* Create secondary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G |
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
WL_CHANSPEC_BW_80 |
WL_CHANSPEC_CTL_SB_LL |
center_chan);
}
/* Create secondary 80MHz chanspec */
- chspec80 = (WL_CHANSPEC_BAND_5G |
+ chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
WL_CHANSPEC_BW_80 |
WL_CHANSPEC_CTL_SB_LL |
center_chan);
else {
/* for 20, 40, and 80 Mhz */
ch[0] = CHSPEC_CHANNEL(chspec);
- ch[1] = -1;
+ ch[1] = 0xFFu;
}
return;
uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t);
if (CHSPEC_IS20(chspec)) return; /* nothing more to do since 20MHz chspec */
/* 20MHz EXT */
- (pext)[1] = pri_ch + (IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART);
+ (pext)[1] = pri_ch + (uint8)(IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART);
if (CHSPEC_IS40(chspec)) return; /* nothing more to do since 40MHz chspec */
/* center 40MHz EXT */
- t = wf_channel2chspec(pri_ch + (IS_CTL_IN_L40(chspec) ?
- CH_40MHZ_APART : -CH_40MHZ_APART), WL_CHANSPEC_BW_40);
+ t = wf_channel2chspec((uint)(pri_ch + (IS_CTL_IN_L40(chspec) ?
+ CH_40MHZ_APART : -CH_40MHZ_APART)), WL_CHANSPEC_BW_40);
GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */
if (CHSPEC_IS80(chspec)) return; /* nothing more to do since 80MHz chspec */
t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL);
* This header file housing the define and function prototype use by
* both the wl driver, tools & Apps.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmwifi_channels.h 695288 2017-04-19 17:20:39Z $
+ * $Id: bcmwifi_channels.h 806092 2019-02-21 08:19:13Z $
*/
#ifndef _bcmwifi_channels_h_
/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sideband */
typedef uint16 chanspec_t;
+typedef uint16 chanspec_band_t;
+typedef uint16 chanspec_bw_t;
+typedef uint16 chanspec_subband_t;
/* channel defines */
#define CH_80MHZ_APART 16
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
-#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+
+#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */
+#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */
+#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */
+#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */
/* maximum # channels the s/w supports */
#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above,
#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
-#define WL_CHANSPEC_BW_MASK 0x3800
-#define WL_CHANSPEC_BW_SHIFT 11
-#define WL_CHANSPEC_BW_5 0x0000
-#define WL_CHANSPEC_BW_10 0x0800
-#define WL_CHANSPEC_BW_20 0x1000
-#define WL_CHANSPEC_BW_40 0x1800
-#define WL_CHANSPEC_BW_80 0x2000
-#define WL_CHANSPEC_BW_160 0x2800
-#define WL_CHANSPEC_BW_8080 0x3000
-
-#define WL_CHANSPEC_BAND_MASK 0xc000
-#define WL_CHANSPEC_BAND_SHIFT 14
-#define WL_CHANSPEC_BAND_2G 0x0000
-#define WL_CHANSPEC_BAND_3G 0x4000
-#define WL_CHANSPEC_BAND_4G 0x8000
-#define WL_CHANSPEC_BAND_5G 0xc000
-#define INVCHANSPEC 255
-#define MAX_CHANSPEC 0xFFFF
+#define WL_CHANSPEC_BW_MASK 0x3800u
+#define WL_CHANSPEC_BW_SHIFT 11u
+#define WL_CHANSPEC_BW_5 0x0000u
+#define WL_CHANSPEC_BW_10 0x0800u
+#define WL_CHANSPEC_BW_20 0x1000u
+#define WL_CHANSPEC_BW_40 0x1800u
+#define WL_CHANSPEC_BW_80 0x2000u
+#define WL_CHANSPEC_BW_160 0x2800u
+#define WL_CHANSPEC_BW_8080 0x3000u
+
+#define WL_CHANSPEC_BAND_MASK 0xc000u
+#define WL_CHANSPEC_BAND_SHIFT 14u
+#define WL_CHANSPEC_BAND_2G 0x0000u
+#define WL_CHANSPEC_BAND_3G 0x4000u
+#define WL_CHANSPEC_BAND_4G 0x8000u
+#define WL_CHANSPEC_BAND_5G 0xc000u
+#define INVCHANSPEC 255u
+#define MAX_CHANSPEC 0xFFFFu
#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)
*/
extern const char *wf_chspec_to_bw_str(chanspec_t chspec);
+/**
+ * Create a 20MHz chanspec for the given band.
+ */
+chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band);
+
/**
* Return the primary 20MHz chanspec.
*
/*
* Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Common OS-independent driver header for rate management.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmxtlv.c 700655 2017-05-20 06:09:06Z $
+ * $Id: bcmxtlv.c 788740 2018-11-13 21:45:01Z $
*/
#include <bcm_cfg.h>
int len;
lenp = (const uint8 *)&elt->len; /* nominal */
- if (opts & BCM_XTLV_OPTION_IDU8) --lenp;
+ if (opts & BCM_XTLV_OPTION_IDU8) {
+ --lenp;
+ }
- if (opts & BCM_XTLV_OPTION_LENU8)
+ if (opts & BCM_XTLV_OPTION_LENU8) {
len = *lenp;
- else
+ } else if (opts & BCM_XTLV_OPTION_LENBE) {
+ len = (uint32)hton16(elt->len);
+ } else {
len = ltoh16_ua(lenp);
+ }
return len;
}
bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
{
int id = 0;
- if (opts & BCM_XTLV_OPTION_IDU8)
+ if (opts & BCM_XTLV_OPTION_IDU8) {
id = *(const uint8 *)elt;
- else
+ } else if (opts & BCM_XTLV_OPTION_IDBE) {
+ id = (uint32)hton16(elt->id);
+ } else {
id = ltoh16_ua((const uint8 *)elt);
+ }
return id;
}
*lenp = (uint8)len;
data_buf = lenp + sizeof(uint8);
} else {
- ASSERT(!"Unexpected xtlv option");
+ bool Unexpected_xtlv_option = TRUE;
+ BCM_REFERENCE(Unexpected_xtlv_option);
+ ASSERT(!Unexpected_xtlv_option);
return;
}
return bcmerror;
}
-void
-dhd_set_path_params(struct dhd_bus *bus)
-{
- /* External conf takes precedence if specified */
- dhd_conf_preinit(bus->dhd);
-
- if (bus->dhd->conf_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
- }
- if (bus->dhd->clm_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
- }
-#ifdef CONFIG_PATH_AUTO_SELECT
- dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
-#endif
-
- dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
-
- dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
- dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
- dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
-
- printf("Final fw_path=%s\n", bus->fw_path);
- printf("Final nv_path=%s\n", bus->nv_path);
- printf("Final clm_path=%s\n", bus->dhd->clm_path);
- printf("Final conf_path=%s\n", bus->dhd->conf_path);
-
-}
-
void
dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path,
char *pnv_path, char *pclm_path, char *pconf_path)
bus->dhd->clm_path = pclm_path;
bus->dhd->conf_path = pconf_path;
- dhd_set_path_params(bus);
+ dhd_conf_set_path_params(bus->dhd, NULL, NULL, bus->fw_path, bus->nv_path);
}
if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK)
goto fail;
#endif
+ } else {
+ goto fail;
}
}
} else {
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd.h 771671 2018-07-11 06:58:25Z $
+ * $Id: dhd.h 822756 2019-05-30 13:20:26Z $
*/
/****************
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#if defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#endif /* defined CONFIG_HAS_WAKELOCK */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#include <linux/sched/types.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
#endif /* PCIE_FULL_DONGLE */
#include <wlioctl.h>
+#include <bcmstdlib_s.h>
#include <dhdioctl.h>
#include <wlfc_proto.h>
#include <hnd_armtrap.h>
#include <pom.h>
#endif /* DHD_ERPOM */
+#include <dngl_stats.h>
+
#ifdef DEBUG_DPC_THREAD_WATCHDOG
#define MAX_RESCHED_CNT 600
#endif /* DEBUG_DPC_THREAD_WATCHDOG */
struct dhd_ioctl;
struct dhd_dbg;
struct dhd_ts;
+#ifdef DNGL_AXI_ERROR_LOGGING
+struct dhd_axi_error_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* The level of bus communication with the dongle */
enum dhd_bus_state {
#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000
#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000
#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000
+#define DHD_BUS_BUSY_IN_HALDUMP 0x8000
#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP
#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP
+#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \
+ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP)
#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP)
+#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \
+ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP)
#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
((dhdp)->dhd_bus_busy_state == 0)
#define DHD_BUS_CHECK_REMOVE(dhdp) \
((dhdp)->busstate == DHD_BUS_REMOVE)
+/* IOVar flags for common error checks */
+#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */
+
#define MAX_MTU_SZ (1600u)
/* (u64)result = (u64)dividend / (u64)divisor */
})
#define SEC_USEC_FMT \
- "%015llu.%06u"
+ "%5llu.%06u"
+/* t: time in nano second */
#define GET_SEC_USEC(t) \
- DIV_U64_BY_U32(t, USEC_PER_SEC), MOD_U64_BY_U32(t, USEC_PER_SEC)
+ DIV_U64_BY_U32(t, NSEC_PER_SEC), \
+ ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC))
/* Download Types */
typedef enum download_type {
#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */
#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
-#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */
#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */
+#ifndef CUSTOM_SCAN_PASSIVE_TIME
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
+#else
+#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */
+#endif /* CUSTOM_SCAN_PASSIVE_TIME */
#ifndef POWERUP_MAX_RETRY
#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
#ifdef DHD_DEBUG
#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
-#endif // endif
+#endif /* DHD_DEBUG */
#ifndef CONFIG_BCMDHD_CLM_PATH
#define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob"
extern char bus_api_revision[];
enum dhd_bus_wake_state {
- WAKE_LOCK_OFF,
- WAKE_LOCK_PRIV,
- WAKE_LOCK_DPC,
- WAKE_LOCK_IOCTL,
- WAKE_LOCK_DOWNLOAD,
- WAKE_LOCK_TMOUT,
- WAKE_LOCK_WATCHDOG,
- WAKE_LOCK_LINK_DOWN_TMOUT,
- WAKE_LOCK_PNO_FIND_TMOUT,
- WAKE_LOCK_SOFTAP_SET,
- WAKE_LOCK_SOFTAP_STOP,
- WAKE_LOCK_SOFTAP_START,
- WAKE_LOCK_SOFTAP_THREAD
+ WAKE_LOCK_OFF = 0,
+ WAKE_LOCK_PRIV = 1,
+ WAKE_LOCK_DPC = 2,
+ WAKE_LOCK_IOCTL = 3,
+ WAKE_LOCK_DOWNLOAD = 4,
+ WAKE_LOCK_TMOUT = 5,
+ WAKE_LOCK_WATCHDOG = 6,
+ WAKE_LOCK_LINK_DOWN_TMOUT = 7,
+ WAKE_LOCK_PNO_FIND_TMOUT = 8,
+ WAKE_LOCK_SOFTAP_SET = 9,
+ WAKE_LOCK_SOFTAP_STOP = 10,
+ WAKE_LOCK_SOFTAP_START = 11,
+ WAKE_LOCK_SOFTAP_THREAD = 12
};
enum dhd_prealloc_index {
- DHD_PREALLOC_PROT = 0,
- DHD_PREALLOC_RXBUF,
- DHD_PREALLOC_DATABUF,
- DHD_PREALLOC_OSL_BUF,
-#if defined(STATIC_WL_PRIV_STRUCT)
- DHD_PREALLOC_WIPHY_ESCAN0 = 5,
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- DHD_PREALLOC_WIPHY_ESCAN1,
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
-#endif /* STATIC_WL_PRIV_STRUCT */
- DHD_PREALLOC_DHD_INFO = 7,
- DHD_PREALLOC_DHD_WLFC_INFO = 8,
- DHD_PREALLOC_IF_FLOW_LKUP = 9,
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+ DHD_PREALLOC_OSL_BUF = 3,
+ DHD_PREALLOC_SKB_BUF = 4,
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
/* 10 */
- DHD_PREALLOC_MEMDUMP_RAM = 11,
- DHD_PREALLOC_DHD_WLFC_HANGER = 12,
- DHD_PREALLOC_PKTID_MAP = 13,
- DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
- DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
DHD_PREALLOC_STAT_REPORT_BUF = 18,
- DHD_PREALLOC_WL_ESCAN_INFO = 19,
+ DHD_PREALLOC_WL_ESCAN = 19,
DHD_PREALLOC_FW_VERBOSE_RING = 20,
DHD_PREALLOC_FW_EVENT_RING = 21,
DHD_PREALLOC_DHD_EVENT_RING = 22,
};
enum dhd_dongledump_mode {
- DUMP_DISABLED = 0,
- DUMP_MEMONLY,
- DUMP_MEMFILE,
- DUMP_MEMFILE_BUGON,
- DUMP_MEMFILE_MAX
+ DUMP_DISABLED = 0,
+ DUMP_MEMONLY = 1,
+ DUMP_MEMFILE = 2,
+ DUMP_MEMFILE_BUGON = 3,
+ DUMP_MEMFILE_MAX = 4
};
enum dhd_dongledump_type {
- DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
- DUMP_TYPE_D3_ACK_TIMEOUT,
- DUMP_TYPE_DONGLE_TRAP,
- DUMP_TYPE_MEMORY_CORRUPTION,
- DUMP_TYPE_PKTID_AUDIT_FAILURE,
- DUMP_TYPE_PKTID_INVALID,
- DUMP_TYPE_SCAN_TIMEOUT,
- DUMP_TYPE_SCAN_BUSY,
- DUMP_TYPE_BY_SYSDUMP,
- DUMP_TYPE_BY_LIVELOCK,
- DUMP_TYPE_AP_LINKUP_FAILURE,
- DUMP_TYPE_AP_ABNORMAL_ACCESS,
- DUMP_TYPE_CFG_VENDOR_TRIGGERED,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_TX,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_RX,
- DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR,
- DUMP_TYPE_TRANS_ID_MISMATCH,
- DUMP_TYPE_IFACE_OP_FAILURE,
-#ifdef DEBUG_DNGL_INIT_FAIL
- DUMP_TYPE_DONGLE_INIT_FAILURE,
-#endif /* DEBUG_DNGL_INIT_FAIL */
- DUMP_TYPE_DONGLE_HOST_EVENT,
- DUMP_TYPE_SMMU_FAULT,
- DUMP_TYPE_RESUMED_UNKNOWN,
-#ifdef DHD_ERPOM
- DUMP_TYPE_DUE_TO_BT,
-#endif /* DHD_ERPOM */
- DUMP_TYPE_LOGSET_BEYOND_RANGE,
- DUMP_TYPE_BY_USER
+ DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+ DUMP_TYPE_D3_ACK_TIMEOUT = 2,
+ DUMP_TYPE_DONGLE_TRAP = 3,
+ DUMP_TYPE_MEMORY_CORRUPTION = 4,
+ DUMP_TYPE_PKTID_AUDIT_FAILURE = 5,
+ DUMP_TYPE_PKTID_INVALID = 6,
+ DUMP_TYPE_SCAN_TIMEOUT = 7,
+ DUMP_TYPE_SCAN_BUSY = 8,
+ DUMP_TYPE_BY_SYSDUMP = 9,
+ DUMP_TYPE_BY_LIVELOCK = 10,
+ DUMP_TYPE_AP_LINKUP_FAILURE = 11,
+ DUMP_TYPE_AP_ABNORMAL_ACCESS = 12,
+ DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15,
+ DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16,
+ DUMP_TYPE_TRANS_ID_MISMATCH = 17,
+ DUMP_TYPE_IFACE_OP_FAILURE = 18,
+ DUMP_TYPE_DONGLE_INIT_FAILURE = 19,
+ DUMP_TYPE_READ_SHM_FAIL = 20,
+ DUMP_TYPE_DONGLE_HOST_EVENT = 21,
+ DUMP_TYPE_SMMU_FAULT = 22,
+ DUMP_TYPE_RESUMED_UNKNOWN = 23,
+ DUMP_TYPE_DUE_TO_BT = 24,
+ DUMP_TYPE_LOGSET_BEYOND_RANGE = 25,
+ DUMP_TYPE_BY_USER = 26,
+ DUMP_TYPE_CTO_RECOVERY = 27,
+ DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28,
+ DUMP_TYPE_PROXD_TIMEOUT = 29,
+ DUMP_TYPE_PKTID_POOL_DEPLETED = 30
};
enum dhd_hang_reason {
- HANG_REASON_MASK = 0x8000,
- HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
- HANG_REASON_DONGLE_TRAP = 0x8002,
- HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
- HANG_REASON_BUS_DOWN = 0x8004,
- HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
- HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
- HANG_REASON_HT_AVAIL_ERROR = 0x8008,
- HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
- HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
- HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
- HANG_REASON_PCIE_LINK_DOWN = 0x8805,
- HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
- HANG_REASON_UNKNOWN = 0x8807,
- HANG_REASON_MAX = 0x8808
+ HANG_REASON_MASK = 0x8000,
+ HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+ HANG_REASON_DONGLE_TRAP = 0x8002,
+ HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+ HANG_REASON_BUS_DOWN = 0x8004,
+ HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+ HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
+ HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+ HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+ HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
+ HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C,
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D,
+ HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E,
+ HANG_REASON_SCAN_BUSY = 0x800F,
+ HANG_REASON_BSS_UP_FAILURE = 0x8010,
+ HANG_REASON_BSS_DOWN_FAILURE = 0x8011,
+ HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805,
+ HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
+ HANG_REASON_UNKNOWN = 0x8807,
+ HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808,
+ HANG_REASON_PCIE_CTO_DETECT = 0x8809,
+ HANG_REASON_MAX = 0x880A
};
#define WLC_E_DEAUTH_MAX_REASON 0x0FFF
};
#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1)
+
+#ifdef DHD_EWPR_VER2
+#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111
+#else
#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905
+#endif // endif
+
#define HANG_INFO_TRAP_T_NAME_MAX 6
#define HANG_INFO_TRAP_T_REASON_IDX 0
#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2
#define HANG_KEY_DEL ' '
#define HANG_RAW_DEL '_'
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BIGDATA_EXTRA_KEY 4
+#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5
+#endif // endif
+
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef DHD_SDALIGN
#define DHD_SDALIGN 32
#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
#ifdef DHD_LOG_DUMP
+#define DUMP_SSSR_ATTR_START 2
+#define DUMP_SSSR_ATTR_COUNT 6
+
+typedef enum {
+ SSSR_C0_D11_BEFORE = 0,
+ SSSR_C0_D11_AFTER = 1,
+ SSSR_C1_D11_BEFORE = 2,
+ SSSR_C1_D11_AFTER = 3,
+ SSSR_DIG_BEFORE = 4,
+ SSSR_DIG_AFTER = 5
+} EWP_SSSR_DUMP;
+
+typedef enum {
+ DLD_BUF_TYPE_GENERAL = 0,
+ DLD_BUF_TYPE_PRESERVE = 1,
+ DLD_BUF_TYPE_SPECIAL = 2,
+ DLD_BUF_TYPE_ECNTRS = 3,
+ DLD_BUF_TYPE_FILTER = 4,
+ DLD_BUF_TYPE_ALL = 5
+} log_dump_type_t;
+
#define LOG_DUMP_MAGIC 0xDEB3DEB3
#define HEALTH_CHK_BUF_SIZE 256
#define ECNTR_RING_NAME "ewp_ecntr_ring"
#endif /* EWP_ECNTRS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+#define RTT_RING_ID 0xADCD
+#define RTT_RING_NAME "ewp_rtt_ring"
+#endif /* EWP_ECNTRS_LOGGING */
+
#if defined(DEBUGABILITY) && defined(EWP_ECNTRS_LOGGING)
#error "Duplicate rings will be created since both the features are enabled"
#endif /* DEBUGABILITY && EWP_ECNTRS_LOGGING */
LOG_DUMP_SECTION_HEALTH_CHK,
LOG_DUMP_SECTION_PRESERVE,
LOG_DUMP_SECTION_COOKIE,
- LOG_DUMP_SECTION_FLOWRING
+ LOG_DUMP_SECTION_FLOWRING,
+ LOG_DUMP_SECTION_STATUS,
+ LOG_DUMP_SECTION_RTT
} log_dump_section_type_t;
/* Each section in the debug_dump log file shall begin with a header */
extern void dhd_log_dump_write(int type, char *binary_data,
int binary_len, const char *fmt, ...);
-extern char *dhd_log_dump_get_timestamp(void);
-bool dhd_log_dump_ecntr_enabled(void);
#endif /* DHD_LOG_DUMP */
/* DEBUG_DUMP SUB COMMAND */
#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted"
#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected"
+#ifdef DNGL_AXI_ERROR_LOGGING
+#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error"
+#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal"
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
extern void get_debug_dump_time(char *str);
extern void clear_debug_dump_time(char *str);
-#define WL_MAX_PRESERVE_BUFFER (NUM_EVENT_LOG_SETS)
-#define FW_LOGSET_MASK_ALL 0xFF
+#define FW_LOGSET_MASK_ALL 0xFFFFu
+
+#ifdef WL_MONITOR
+#define MONPKT_EXTRA_LEN 48u
+#endif /* WL_MONITOR */
+
+#define DHDIF_FWDER(dhdif) FALSE
#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/"
};
#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
+/* Enable Reserve STA flowrings only for Android */
+#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+
+typedef enum dhd_induce_error_states
+{
+ DHD_INDUCE_ERROR_CLEAR = 0x0,
+ DHD_INDUCE_IOCTL_TIMEOUT = 0x1,
+ DHD_INDUCE_D3_ACK_TIMEOUT = 0x2,
+ DHD_INDUCE_LIVELOCK = 0x3,
+ DHD_INDUCE_DROP_OOB_IRQ = 0x4,
+ DHD_INDUCE_DROP_AXI_SIG = 0x5,
+ DHD_INDUCE_ERROR_MAX = 0x6
+} dhd_induce_error_states_t;
+
+#ifdef DHD_HP2P
+#define MAX_TX_HIST_BIN 16
+#define MAX_RX_HIST_BIN 10
+#define MAX_HP2P_FLOWS 16
+#define HP2P_PRIO 7
+#define HP2P_PKT_THRESH 48
+#define HP2P_TIME_THRESH 200
+#define HP2P_PKT_EXPIRY 40
+#define HP2P_TIME_SCALE 32
+
+typedef struct hp2p_info {
+ void *dhd_pub;
+ uint16 flowid;
+ bool hrtimer_init;
+ void *ring;
+ struct tasklet_hrtimer timer;
+ uint64 num_pkt_limit;
+ uint64 num_timer_limit;
+ uint64 num_timer_start;
+ uint64 tx_t0[MAX_TX_HIST_BIN];
+ uint64 tx_t1[MAX_TX_HIST_BIN];
+ uint64 rx_t0[MAX_RX_HIST_BIN];
+} hp2p_info_t;
+#endif /* DHD_HP2P */
+
+typedef enum {
+ FW_UNLOADED = 0,
+ FW_DOWNLOAD_IN_PROGRESS = 1,
+ FW_DOWNLOAD_DONE = 2
+} fw_download_status_t;
+
/**
* Common structure for module and instance linkage.
* Instantiated once per hardware (dongle) instance that this DHD manages.
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
uint8 wme_dp; /* wme discard priority */
-
+#ifdef DNGL_AXI_ERROR_LOGGING
+ uint32 axierror_logbuf_addr;
+ bool axi_error;
+ struct dhd_axi_error_dump *axi_err_dump;
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* Dongle media info */
bool iswl; /* Dongle-resident driver is wl */
ulong drv_version; /* Version of dongle-resident driver */
*/
/* #define WL_ENABLE_P2P_IF 1 */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
- struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
-#endif // endif
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
#ifdef PROP_TXSTATUS
bool wlfc_enabled;
#endif // endif
bool dongle_isolation;
bool is_pcie_watchdog_reset;
+
+/* Begin - Variables to track Bus Errors */
bool dongle_trap_occured; /* flag for sending HANG event to upper layer */
bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */
+ bool is_sched_error; /* flag to indicate timeout due to scheduling issue */
#ifdef PCIE_FULL_DONGLE
bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */
bool livelock_occured; /* flag to indicate livelock occured */
+ bool pktid_audit_failed; /* flag to indicate pktid audit failure */
#endif /* PCIE_FULL_DONGLE */
+ bool iface_op_failed; /* flag to indicate interface operation failed */
+ bool scan_timeout_occurred; /* flag to indicate scan has timedout */
+ bool scan_busy_occurred; /* flag to indicate scan busy occurred */
#ifdef BT_OVER_SDIO
bool is_bt_recovery_required;
#endif // endif
-#ifdef DHD_MAP_LOGGING
bool smmu_fault_occurred; /* flag to indicate SMMU Fault */
-#endif /* DHD_MAP_LOGGING */
+ /*
+ * Add any new variables to track Bus errors above
+ * this line. Also ensure that the variable is
+ * cleared from dhd_clear_bus_errors
+ */
+/* End - Variables to track Bus Errors */
+
int hang_was_sent;
+ int hang_was_pending;
int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
int txcnt_timeout; /* counter txcnt timeout to send HANG */
#ifdef BCMPCIE
#endif /* DHDTCPACK_SUPPRESS */
#if defined(ARP_OFFLOAD_SUPPORT)
uint32 arp_version;
+ bool hmac_updated;
#endif // endif
#if defined(BCMSUP_4WAY_HANDSHAKE)
bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
void *if_flow_lkup; /* per interface flowid lkup hash table */
void *flowid_lock; /* per os lock for flowid info protection */
void *flowring_list_lock; /* per os lock for flowring list protection */
+ uint8 max_multi_client_flow_rings;
+ uint8 multi_client_flow_rings;
uint32 num_flow_rings;
cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */
bool dma_h2d_ring_upd_support;
bool dma_ring_upd_overwrite; /* host overwrites support setting */
+ bool hwa_enable;
+ uint hwa_inited;
+
bool idma_enable;
uint idma_inited;
#endif /* DHD_L2_FILTER */
#ifdef DHD_SSSR_DUMP
bool sssr_inited;
+ bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */
sssr_reg_info_v1_t sssr_reg_info;
uint8 *sssr_mempool;
uint *sssr_d11_before[MAX_NUM_D11CORES];
uint *sssr_dig_buf_before;
uint *sssr_dig_buf_after;
uint32 sssr_dump_mode;
+ bool collect_sssr; /* Flag to indicate SSSR dump is required */
#endif /* DHD_SSSR_DUMP */
uint8 *soc_ram;
uint32 soc_ram_length;
bool h2d_phase_supported;
bool force_dongletrap_on_bad_h2d_phase;
uint32 dongle_trap_data;
- bool fw_download_done;
+ fw_download_status_t fw_download_status;
trap_t last_trap_info; /* trap info from the last trap */
uint8 rand_mac_oui[DOT11_OUI_LEN];
#ifdef DHD_LOSSLESS_ROAMING
#ifdef EWP_ECNTRS_LOGGING
void *ecntr_dbg_ring;
#endif // endif
+#ifdef EWP_RTT_LOGGING
+ void *rtt_dbg_ring;
+#endif // endif
#ifdef DNGL_EVENT_SUPPORT
uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE];
#endif // endif
uint pcie_txs_metadata_enable;
uint wbtext_policy; /* wbtext policy of dongle */
bool wbtext_support; /* for product policy only */
-#ifdef SHOW_LOGTRACE
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- struct mutex dhd_trace_lock;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
-#endif /* SHOW_LOGTRACE */
-
bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */
tput_test_t tput_data;
uint64 tput_start_ts;
struct _dhd_dump_file_manage *dump_file_manage;
#endif /* DHD_DUMP_MNGR */
int debug_dump_subcmd;
+ uint64 debug_dump_time_sec;
bool hscb_enable;
wait_queue_head_t tx_completion_wait;
uint32 batch_tx_pkts_cmpl;
bool dongle_edl_support;
dhd_dma_buf_t edl_ring_mem;
#endif /* EWP_EDL */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
struct mutex ndev_op_sync;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
bool debug_buf_dest_support;
uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX];
*/
uint32 dhd_rte_time_sync_ms;
#endif /* DHD_H2D_LOG_TIME_SYNC */
+ int wlc_ver_major;
+ int wlc_ver_minor;
+#ifdef DHD_STATUS_LOGGING
+ void *statlog;
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_HP2P
+ bool hp2p_enable;
+ bool hp2p_infra_enable;
+ bool hp2p_capable;
+ bool hp2p_ts_capable;
+ uint16 pkt_thresh;
+ uint16 time_thresh;
+ uint16 pkt_expiry;
+ hp2p_info_t hp2p_info[MAX_HP2P_FLOWS];
+ bool hp2p_ring_active;
+#endif /* D2H_HP2P */
+#ifdef DHD_DB0TS
+ bool db0ts_capable;
+#endif /* DHD_DB0TS */
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
+ uint16 dhd_induce_error;
+#ifdef CONFIG_SILENT_ROAM
+ bool sroam_turn_on; /* Silent roam monitor enable flags */
+ bool sroamed; /* Silent roam monitor check flags */
+#endif /* CONFIG_SILENT_ROAM */
+ bool extdtxs_in_txcpl;
+ bool hostrdy_after_init;
+#ifdef SUPPORT_SET_TID
+ uint8 tid_mode;
+ uint32 target_uid;
+ uint8 target_tid;
+#endif /* SUPPORT_SET_TID */
+#ifdef DHD_PKTDUMP_ROAM
+ void *pktcnts;
+#endif /* DHD_PKTDUMP_ROAM */
+ bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */
char *clm_path; /* module_param: path to clm vars file */
char *conf_path; /* module_param: path to config vars file */
struct dhd_conf *conf; /* Bus module handle */
void *adapter; /* adapter information, interrupt, fw path etc. */
+ void *event_params;
#ifdef BCMDBUS
bool dhd_remove;
#endif /* BCMDBUS */
-#if defined(WL_WIRELESS_EXT)
-#if defined(WL_ESCAN)
- void *escan;
-#else
- void *iscan;
+#ifdef WL_ESCAN
+ struct wl_escan_info *escan;
#endif
+#if defined(WL_WIRELESS_EXT)
+ void *wext_info;
#endif
#ifdef WL_EXT_IAPSTA
void *iapsta_params;
+#endif
+ int hostsleep;
+#ifdef SENDPROB
+ bool recv_probereq;
#endif
} dhd_pub_t;
void *dmah; /* dma mapper handle */
void *secdma; /* secure dma sec_cma_info handle */
#endif /* !DHD_PCIE_PKTID */
-#ifdef TX_STATUS_LATENCY_STATS
+#if defined(TX_STATUS_LATENCY_STATS)
uint64 q_time_us; /* time when tx pkt queued to flowring */
-#endif /* TX_STATUS_LATENCY_STATS */
+#endif // endif
} dhd_pkttag_fd_t;
/* Packet Tag for DHD PCIE Full Dongle */
#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
-#ifdef TX_STATUS_LATENCY_STATS
+#if defined(TX_STATUS_LATENCY_STATS)
#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us)
#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \
DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us)
-#endif /* TX_STATUS_LATENCY_STATS */
+#endif // endif
#endif /* PCIE_FULL_DONGLE */
#if defined(BCMWDF)
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
#endif /* (BCMWDF) */
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ #if defined(CONFIG_PM_SLEEP)
#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
#define _DHD_PM_RESUME_WAIT(a, b) do {\
} \
} while (0)
- #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+ #endif /* CONFIG_PM_SLEEP */
#ifndef OSL_SLEEP
#define OSL_SLEEP(ms) OSL_DELAY(ms*1000)
inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_lock(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhdp->wl_softap_lock);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
#ifdef DHD_DEBUG_WAKE_LOCK
#define OOB_WAKE_LOCK_TIMEOUT 500
extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
-extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
+
#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
#endif /* BCMPCIE_OOB_HOST_WAKE */
, void *adapter
#endif
);
+extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock);
#if defined(WLP2P) && defined(WL_CFG80211)
/* To allow attach/detach calls corresponding to p2p0 interface */
extern int dhd_attach_p2p(dhd_pub_t *);
/* Notify tx completion */
extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx);
+extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp);
void dhd_os_logdump_unlock(dhd_pub_t *pub);
extern int dhd_os_proto_block(dhd_pub_t * pub);
extern int dhd_os_proto_unblock(dhd_pub_t * pub);
-extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool resched);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition);
extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
/* Bound and delay are fine tuned after several experiments and these
* are the best case values to handle bombarding of console logs.
*/
-#define DHD_EVENT_LOGTRACE_BOUND 1
-#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 200
+#define DHD_EVENT_LOGTRACE_BOUND 10
+/* since FW has event log rate health check (EVENT_LOG_RATE_HC) we can reduce
+ * the reschedule delay to 10ms
+*/
+#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u
extern int dhd_os_read_file(void *file, char *buf, uint32 size);
extern int dhd_os_seek_file(void *file, int64 offset);
void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
#endif /* SHOW_LOGTRACE */
int dhd_os_write_file_posn(void *fp, unsigned long *posn,
void *buf, unsigned long buflen);
-
-#if defined(PCIE_FULL_DONGLE)
-extern void dhd_pcie_backplane_access_lock(dhd_pub_t * pub);
-extern void dhd_pcie_backplane_access_unlock(dhd_pub_t * pub);
-#endif /* defined(PCIE_FULL_DONGLE) */
+int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry,
+ uint message_number, bool unmask);
extern void
dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
#endif /* DHD_FW_COREDUMP */
-void dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
+void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
+#ifdef DNGL_AXI_ERROR_LOGGING
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef BCMPCIE
+void dhd_schedule_cto_recovery(dhd_pub_t *dhdp);
+#endif /* BCMPCIE */
#ifdef PKT_FILTER_SUPPORT
#define DHD_UNICAST_FILTER_NUM 0
#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32
#define MAX_PKTFLT_FIXED_BUF_SIZE \
(WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2)
+#define MAXPKT_ARG 16
#endif /* PKT_FILTER_SUPPORT */
#if defined(BCMPCIE)
} dhd_event_log_t;
#endif /* SHOW_LOGTRACE */
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
-#define PCIE_DMAXFER_LPBK_LENGTH 4096
-typedef struct dhd_pcie_dmaxfer_lpbk {
- union {
- uint32 length;
- uint32 status;
- } u;
- uint32 srcdelay;
- uint32 destdelay;
- uint32 lpbkmode;
- uint32 wait;
- uint32 core;
-} dhd_pcie_dmaxfer_lpbk_t;
-#endif /* DHD_NON_DMA_M2M_CORRUPTION */
-enum d11_lpbk_type {
- M2M_DMA_LPBK = 0,
- D11_LPBK = 1,
- BMC_LPBK = 2,
- M2M_NON_DMA_LPBK = 3,
- D11_HOST_MEM_LPBK = 4,
- BMC_HOST_MEM_LPBK = 5,
- MAX_LPBK = 6
-};
-
#ifdef KEEP_ALIVE
extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec);
/* Roaming mode control */
extern uint dhd_radio_up;
+/* TCM verification control */
+extern uint dhd_tcm_test_enable;
+
/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
extern int dhd_idletime;
#ifdef DHD_USE_IDLECOUNT
#define VENDOR_PATH ""
#endif /* ANDROID_PLATFORM_VERSION */
+#if defined(ANDROID_PLATFORM_VERSION)
+#if (ANDROID_PLATFORM_VERSION < 9)
+#ifdef WL_STATIC_IF
+#undef WL_STATIC_IF
+#endif /* WL_STATIC_IF */
+#ifdef WL_STATIC_IFNAME_PREFIX
+#undef WL_STATIC_IFNAME_PREFIX
+#endif /* WL_STATIC_IFNAME_PREFIX */
+#endif /* ANDROID_PLATFORM_VERSION < 9 */
+#endif /* ANDROID_PLATFORM_VERSION */
+
#if defined(DHD_LEGACY_FILE_PATH)
#define PLATFORM_PATH "/data/"
#elif defined(PLATFORM_SLP)
#if defined(ANDROID_PLATFORM_VERSION)
#if (ANDROID_PLATFORM_VERSION >= 9)
#define PLATFORM_PATH "/data/vendor/conn/"
+#define DHD_MAC_ADDR_EXPORT
+#define DHD_ADPS_BAM_EXPORT
+#define DHD_EXPORT_CNTL_FILE
+#define DHD_SOFTAP_DUAL_IF_INFO
+#define DHD_SEND_HANG_PRIVCMD_ERRORS
#else
#define PLATFORM_PATH "/data/misc/conn/"
#endif /* ANDROID_PLATFORM_VERSION >= 9 */
#endif /* ANDROID_PLATFORM_VERSION */
#endif /* DHD_LEGACY_FILE_PATH */
+#ifdef DHD_MAC_ADDR_EXPORT
+extern struct ether_addr sysfs_mac_addr;
+#endif /* DHD_MAC_ADDR_EXPORT */
+
/* Flag to indicate if we should download firmware on driver load */
extern uint dhd_download_fw_on_driverload;
#ifndef BCMDBUS
extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
+extern int dhd_file_delete(char *path);
#ifdef READ_MACADDR
extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif /* WRITE_MACADDR */
#ifdef USE_CID_CHECK
+#define MAX_VNAME_LEN 64
+#ifdef DHD_EXPORT_CNTL_FILE
+extern char cidinfostr[MAX_VNAME_LEN];
+#endif /* DHD_EXPORT_CNTL_FILE */
extern int dhd_check_module_cid(dhd_pub_t *dhdp);
extern char *dhd_get_cid_info(unsigned char *vid, int vid_length);
#else
int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen);
int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
-
+void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname);
uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
PARAM_LAST_VALUE
};
extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
+#ifdef DHD_EXPORT_CNTL_FILE
+#define VALUENOTSET 0xFFFFFFFFu
+extern uint32 bus_txglom;
+extern uint32 roam_off;
+#ifdef USE_WL_FRAMEBURST
+extern uint32 frameburst;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+extern uint32 txbf;
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+extern uint32 proptx;
+#endif /* PROP_TXSTATUS */
+#endif /* DHD_EXPORT_CNTL_FILE */
#endif /* USE_WFA_CERT_CONF */
#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
#define DHD_BUS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_BUS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+/* Enable DHD backplane spin lock/unlock */
+#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+
#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
void custom_rps_map_clear(struct netdev_rx_queue *queue);
#define PRIMARY_INF 0
#define VIRTUAL_INF 1
-#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \
- defined(CONFIG_SOC_EXYNOS8890)
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890)
#define RPS_CPUS_MASK "10"
#define RPS_CPUS_MASK_P2P "10"
#define RPS_CPUS_MASK_IBSS "10"
#define RPS_CPUS_MASK "6"
#define RPS_CPUS_MASK_P2P "6"
#define RPS_CPUS_MASK_IBSS "6"
-#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
#endif // endif
int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
#if defined(DHD_LB_STATS)
#include <bcmutils.h>
extern void dhd_lb_stats_init(dhd_pub_t *dhd);
+extern void dhd_lb_stats_deinit(dhd_pub_t *dhd);
extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
#endif /* !DHD_LB_STATS */
-#ifdef DHD_LB_IRQSET
-extern void dhd_irq_set_affinity(dhd_pub_t *dhdp);
-#endif /* DHD_LB_IRQSET */
#ifdef DHD_SSSR_DUMP
-#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */
+#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */
/* used in sssr_dump_mode */
#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */
extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
+extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
+extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path);
+
#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp)
#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp)
#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path)
#else
-#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0)
#endif /* DHD_SSSR_DUMP */
-#ifdef SHOW_LOGTRACE
-void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info);
-#endif /* SHOW_LOGTRACE */
-
#ifdef BCMPCIE
extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp);
+extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd);
+extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd);
#else
#define dhd_prot_debug_info_print(x)
static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub)
{ return 0; }
#endif /* BCMPCIE */
-bool dhd_fw_download_status(dhd_pub_t * dhd_pub);
+fw_download_status_t dhd_fw_download_status(dhd_pub_t * dhd_pub);
void dhd_show_kirqstats(dhd_pub_t *dhd);
/* Bitmask used for Join Timeout */
void dhd_pktid_error_handler(dhd_pub_t *dhdp);
#endif /* DHD_PKTID_AUDIT_ENABLED */
+#ifdef DHD_MAP_PKTID_LOGGING
+extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+
#define DHD_DISABLE_RUNTIME_PM(dhdp)
#define DHD_ENABLE_RUNTIME_PM(dhdp)
extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
extern void dhd_prot_dump_ring_ptrs(void *prot_info);
-/*
- * Enable this macro if you want to track the calls to wake lock
- * This records can be printed using the following command
- * cat /sys/bcm-dhd/wklock_trace
- * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version
- */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
-#undef DHD_TRACE_WAKE_LOCK
-#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */
-
#if defined(DHD_TRACE_WAKE_LOCK)
void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
#endif // endif
extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
+void dhd_clear_bus_errors(dhd_pub_t *dhdp);
#if defined(CONFIG_64BIT)
#define DHD_SUPPORT_64BIT
uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp);
#ifdef SET_PCIE_IRQ_CPU_CORE
-extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set);
-extern void set_irq_cpucore(unsigned int irq, int set);
+enum {
+ PCIE_IRQ_AFFINITY_OFF = 0,
+ PCIE_IRQ_AFFINITY_BIG_CORE_ANY,
+ PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS,
+ PCIE_IRQ_AFFINITY_LAST
+};
+extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd);
#endif /* SET_PCIE_IRQ_CPU_CORE */
#ifdef DHD_WAKE_STATUS
#endif /* DHD_BLOB_EXISTENCE_CHECK */
/* configuration of ecounters. API's tp start/stop. currently supported only for linux */
+extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable);
extern int dhd_start_ecounters(dhd_pub_t *dhd);
extern int dhd_stop_ecounters(dhd_pub_t *dhd);
extern int dhd_start_event_ecounters(dhd_pub_t *dhd);
void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type);
void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr);
-int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos);
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr,
+ uint32 sec_type);
+int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type);
+int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp,
+ const void *user_buf, unsigned long *f_pos);
+int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf);
+uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp);
int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp);
void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type);
int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size);
int dhd_logdump_cookie_count(dhd_pub_t *dhdp);
-#endif /* DHD_LOG_DUMP */
+int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp,
+ uint32 len, int type, void *pos);
+int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos);
+#ifdef DHD_DUMP_PCIE_RINGS
+int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_DUMP_PCIE_RINGS */
+#ifdef DHD_STATUS_LOGGING
+extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp,
+ const void *user_buf, void *fp, uint32 len, void *pos);
+extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp);
+#endif /* DHD_STATUS_LOGGING */
+int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos);
+int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp,
+ char *dump_path, int size);
+#if defined(BCMPCIE)
+uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp);
+#endif
+uint32 dhd_get_time_str_len(void);
+uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp);
+uint32 dhd_get_dld_len(int log_type);
+void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr);
+extern char *dhd_log_dump_get_timestamp(void);
+bool dhd_log_dump_ecntr_enabled(void);
+bool dhd_log_dump_rtt_enabled(void);
+void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len);
+int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type);
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core);
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len);
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len);
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len);
+extern int dhd_os_get_axi_error_dump_size(struct net_device *dev);
+extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* DHD_LOG_DUMP */
+int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos);
#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev)
#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev)
/* ========= RING API functions : exposed to others ============= */
#define DHD_RING_TYPE_FIXED 1
+#define DHD_RING_TYPE_SINGLE_IDX 2
uint32 dhd_ring_get_hdr_size(void);
-void *dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt);
-void dhd_ring_deinit(void *_ring);
+void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type);
+void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring);
void *dhd_ring_get_first(void *_ring);
void dhd_ring_free_first(void *_ring);
+void dhd_ring_set_read_idx(void *_ring, uint32 read_idx);
+void dhd_ring_set_write_idx(void *_ring, uint32 write_idx);
+uint32 dhd_ring_get_read_idx(void *_ring);
+uint32 dhd_ring_get_write_idx(void *_ring);
void *dhd_ring_get_last(void *_ring);
void *dhd_ring_get_next(void *_ring, void *cur);
void *dhd_ring_get_prev(void *_ring, void *cur);
void *dhd_ring_lock_get_last(void *_ring);
int dhd_ring_lock_get_count(void *_ring);
void dhd_ring_lock_free_first(void *ring);
+void dhd_ring_whole_lock(void *ring);
+void dhd_ring_whole_unlock(void *ring);
#define DHD_DUMP_TYPE_NAME_SIZE 32
#define DHD_DUMP_FILE_PATH_SIZE 256
extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname);
#endif /* DHD_DUMP_MNGR */
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif // endif
+
#ifdef DHD_DUMP_PCIE_RINGS
-extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn);
+extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write);
#endif /* DHD_DUMP_PCIE_RINGS */
#ifdef EWP_EDL
#endif /* DHD_H2D_LOG_TIME_SYNC */
extern void dhd_cleanup_if(struct net_device *net);
+#ifdef DNGL_AXI_ERROR_LOGGING
+extern void dhd_axi_error(dhd_pub_t *dhd);
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef DHD_HP2P
+extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub);
+extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags);
+#endif /* DHD_HP2P */
+extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+#ifdef DHD_STATUS_LOGGING
+#include <dhd_statlog.h>
+#else
+#define ST(x) 0
+#define STDIR(x) 0
+#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
+ do { /* noop */ } while (0)
+#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
+ do { BCM_REFERENCE(cond); } while (0)
+#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
+ do { /* noop */ } while (0)
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef CONFIG_SILENT_ROAM
+extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set);
+typedef wlc_sroam_info_v1_t wlc_sroam_info_t;
+#endif /* CONFIG_SILENT_ROAM */
+
+#ifdef SUPPORT_SET_TID
+enum dhd_set_tid_mode {
+ /* Disalbe changing TID */
+ SET_TID_OFF = 0,
+ /* Change TID for all UDP frames */
+ SET_TID_ALL_UDP,
+ /* Change TID for UDP frames based on UID */
+ SET_TID_BASED_ON_UID
+};
+extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt);
+#endif /* SUPPORT_SET_TID */
+
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+#define FILE_NAME_HAL_TAG ""
+#else
+#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab);
+extern uint8 control_he_enab;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
#endif /* _dhd_h_ */
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_bus.h 770457 2018-07-03 08:45:49Z $
+ * $Id: dhd_bus.h 814378 2019-04-11 02:21:31Z $
*/
#ifndef _dhd_bus_h_
* Exported from dhd bus module (dhd_usb, dhd_sdio)
*/
+/* global variable for the bus */
+extern struct dhd_bus *g_dhd_bus;
+
/* Indicate (dis)interest in finding dongles. */
extern int dhd_bus_register(void);
extern void dhd_bus_unregister(void);
extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
#endif // endif
+#ifdef BCMPCIE
+extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd);
+#endif /* BCMPCIE */
+
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
*/
#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO))
extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp);
#else
#define dhd_bus_mem_dump(x)
+#define dhd_bus_get_mem_dump(x)
#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */
#ifdef BCMPCIE
extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
-extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
#ifdef IDLE_TX_FLOW_MGMT
extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
extern int dhd_bus_release_dongle(struct dhd_bus *bus);
extern int dhd_bus_request_irq(struct dhd_bus *bus);
extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq);
+extern void dhd_bus_aer_config(struct dhd_bus *bus);
extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
-extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable);
+
extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus);
#ifdef DHD_FW_COREDUMP
-extern struct dhd_bus *g_dhd_bus;
extern int dhd_dongle_mem_dump(void);
#endif /* DHD_FW_COREDUMP */
#ifdef BCMPCIE
extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp);
+extern void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp);
+extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp);
+extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type);
+extern bool dhd_bus_check_driver_up(void);
+extern int dhd_bus_get_cto(dhd_pub_t *dhdp);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
+extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp);
#else
#define dhd_bus_dump_console_buffer(x)
static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); }
+static INLINE void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) { }
+static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; }
+static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; }
+extern INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { }
+extern INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; }
+static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; }
#endif /* BCMPCIE */
#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable);
extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable);
+bool dhd_bus_is_multibp_capable(struct dhd_bus *bus);
+
#ifdef BCMPCIE
extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp);
extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd);
extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus);
#ifdef DHD_SSSR_DUMP
-extern int dhd_bus_sssr_dump(dhd_pub_t *dhd);
-
extern int dhd_bus_fis_trigger(dhd_pub_t *dhd);
extern int dhd_bus_fis_dump(dhd_pub_t *dhd);
-
#endif /* DHD_SSSR_DUMP */
+
#ifdef PCIE_FULL_DONGLE
extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val);
#endif /* PCIE_FULL_DONGLE */
#ifdef DHD_USE_BP_RESET
extern int dhd_bus_perform_bp_reset(struct dhd_bus *bus);
#endif /* DHD_USE_BP_RESET */
+
+extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus);
+extern void dhd_bwm_bt_resume(struct dhd_bus *bus);
#endif /* _dhd_bus_h_ */
/*
* Broadcom logging system - Empty implementaiton
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* DHD Protocol Module for CDC and BDC.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#endif /* BCMDBUS */
#ifdef BCMDBUS
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
if ((!timeout) || (!prot->ctl_completed)) {
DHD_ERROR(("Txctl timeout %d ctl_completed %d\n",
timeout, prot->ctl_completed));
/* interrupt polling is sucessfully submitted. Wait for dongle to send
* interrupt
*/
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
if (!timeout) {
DHD_ERROR(("intr poll wait timed out\n"));
}
DHD_OS_IOCTL_RESP_UNLOCK(dhd);
goto done;
}
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
if ((!timeout) || (!prot->ctl_completed)) {
DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n",
timeout, prot->ctl_completed));
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_cfg80211.c 771186 2018-07-09 09:14:04Z $
+ * $Id: dhd_cfg80211.c 807961 2019-03-05 05:47:47Z $
*/
#include <linux/vmalloc.h>
void wl_cfg80211_cleanup_if(struct net_device *net)
{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ BCM_REFERENCE(cfg);
dhd_cleanup_if(net);
}
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom Dongle Host Driver (DHD), common DHD core.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_common.c 771671 2018-07-11 06:58:25Z $
+ * $Id: dhd_common.c 826445 2019-06-20 04:47:47Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <epivers.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <dngl_stats.h>
#include <dhd_bus.h>
#include <dhd_proto.h>
-#include <dhd_config.h>
#include <bcmsdbus.h>
#include <dhd_dbg.h>
#include <802.1d.h>
#include <dhd_dbg_ring.h>
#include <dhd_mschdbg.h>
#include <msgtrace.h>
+#include <dhd_config.h>
+#include <wl_android.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif // endif
-#ifdef PNO_SUPPORT
+#if defined(PNO_SUPPORT)
#include <dhd_pno.h>
-#endif // endif
+#endif /* OEM_ANDROID && PNO_SUPPORT */
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
#endif // endif
#include <dhd_wlfc.h>
#endif // endif
+#if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
+#include <dhd_linux.h>
+#endif // endif
+
#ifdef DHD_L2_FILTER
#include <dhd_l2_filter.h>
#endif /* DHD_L2_FILTER */
#include <dhd_dbg.h>
#endif /* DHD_LOG_DUMP */
-int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+int log_print_threshold = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+int dhd_msg_level = DHD_ERROR_VAL;// | DHD_FWLOG_VAL | DHD_EVENT_VAL
+ /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
+// | DHD_PKT_MON_VAL;
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
IOV_TPUT_TEST,
IOV_FIS_TRIGGER,
IOV_DEBUG_BUF_DEST_STAT,
+#ifdef DHD_DEBUG
+ IOV_INDUCE_ERROR,
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ IOV_CONC_DISC,
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ IOV_IFACE_POLICY,
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+ IOV_RTT_GEOFENCE_TYPE_OVRD,
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
IOV_LAST
};
{"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
#endif /* DHD_LOG_DUMP */
{"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_DEBUG
+ {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
+#endif /* DHD_DEBUG */
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+ {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
{NULL, 0, 0, 0, 0, 0 }
};
bool ret = FALSE;
if (dhdp->dongle_reset) {
- DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
if (dhdp->dongle_trap_occured) {
- DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
dhd_os_send_hang_message(dhdp);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
}
if (dhdp->iovar_timeout_occured) {
- DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#ifdef PCIE_FULL_DONGLE
if (dhdp->d3ack_timeout_occured) {
- DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
if (dhdp->livelock_occured) {
- DHD_ERROR(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
+ DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->pktid_audit_failed) {
+ DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#endif /* PCIE_FULL_DONGLE */
+ if (dhdp->iface_op_failed) {
+ DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->scan_timeout_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhdp->scan_busy_occurred) {
+ DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_error) {
+ DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+ if (dhd_bus_get_linkdown(dhdp)) {
+ DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
+ if (dhd_bus_get_cto(dhdp)) {
+ DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
+ __FUNCTION__));
+ ret = TRUE;
+ }
+
return ret;
}
+void
+dhd_clear_bus_errors(dhd_pub_t *dhdp)
+{
+ if (!dhdp)
+ return;
+
+ dhdp->dongle_reset = FALSE;
+ dhdp->dongle_trap_occured = FALSE;
+ dhdp->iovar_timeout_occured = FALSE;
+#ifdef PCIE_FULL_DONGLE
+ dhdp->d3ack_timeout_occured = FALSE;
+ dhdp->livelock_occured = FALSE;
+ dhdp->pktid_audit_failed = FALSE;
+#endif // endif
+ dhdp->iface_op_failed = FALSE;
+ dhdp->scan_timeout_occurred = FALSE;
+ dhdp->scan_busy_occurred = FALSE;
+}
+
#ifdef DHD_SSSR_DUMP
/* This can be overwritten by module parameter defined in dhd_linux.c */
return;
}
+void
+dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
+{
+ bool print_info = FALSE;
+ int dump_mode;
+
+ if (!dhd || !path) {
+ DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!dhd->sssr_dump_collected) {
+ /* SSSR dump is not collected */
+ return;
+ }
+
+ dump_mode = dhd->sssr_dump_mode;
+
+ if (bcmstrstr(path, "core_0_before")) {
+ if (dhd->sssr_d11_outofreset[0] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_0_after")) {
+ if (dhd->sssr_d11_outofreset[0]) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_before")) {
+ if (dhd->sssr_d11_outofreset[1] &&
+ dump_mode == SSSR_DUMP_MODE_SSSR) {
+ print_info = TRUE;
+ }
+ } else if (bcmstrstr(path, "core_1_after")) {
+ if (dhd->sssr_d11_outofreset[1]) {
+ print_info = TRUE;
+ }
+ } else {
+ print_info = TRUE;
+ }
+
+ if (print_info) {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ path, FILE_NAME_HAL_TAG));
+ }
+}
#endif /* DHD_SSSR_DUMP */
#ifdef DHD_FW_COREDUMP
dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
dhdp->dma_stats.tsbuf_rx_sz));
#endif /* DMAMAP_STATS */
-
+ bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
/* Add any prot info */
dhd_prot_dump(dhdp, strbuf);
bcm_bprintf(strbuf, "\n");
#ifdef DUMP_IOCTL_IOV_LIST
dhd_iov_li_t *iov_li;
#endif /* DUMP_IOCTL_IOV_LIST */
+ int hostsleep_set = 0;
+ int hostsleep_val = 0;
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
DHD_OS_WAKE_LOCK(dhd_pub);
DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
}
#endif /* HW_DISCONNECT_TRACE */
+
/* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
if (ioc->set == TRUE) {
char *pars = (char *)buf; // points at user buffer
}
#endif /* DUMP_IOCTL_IOV_LIST */
+ if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
+ &hostsleep_set, &hostsleep_val, &ret))
+ goto exit;
ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+ dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
#ifdef DUMP_IOCTL_IOV_LIST
if (ret == -ETIMEDOUT) {
DHD_ERROR(("%s: 'resumed on timeout' error is "
"occurred before the interface does not"
" bring up\n", __FUNCTION__));
- dhd_pub->busstate = DHD_BUS_DOWN;
}
+exit:
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
dhd_os_busbusy_wake(dhd_pub);
#ifdef SHOW_LOGTRACE
case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
- trace_buf_info_t *trace_buf_info;
+ trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
+
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring == NULL) {
+ DHD_ERROR(("dbg_verbose_ring is NULL\n"));
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
- trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh,
- sizeof(trace_buf_info_t));
if (trace_buf_info != NULL) {
- dhd_get_read_buf_ptr(dhd_pub, trace_buf_info);
- memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t));
- MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+ bzero(trace_buf_info, sizeof(trace_buf_info_t));
+ dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
} else {
- DHD_ERROR(("Memory allocation Failed\n"));
+ DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
bcmerror = BCME_NOMEM;
}
break;
}
break;
}
+
+#ifdef DHD_DEBUG
+ case IOV_SVAL(IOV_INDUCE_ERROR): {
+ if (int_val >= DHD_INDUCE_ERROR_MAX) {
+ DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
+ } else {
+ dhd_pub->dhd_induce_error = (uint16)int_val;
+ }
+ break;
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_CFG80211
+#ifdef WL_NANP2P
+ case IOV_GVAL(IOV_CONC_DISC): {
+ int_val = wl_cfg80211_get_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CONC_DISC): {
+ bcmerror = wl_cfg80211_set_iface_conc_disc(
+ dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
+ break;
+ }
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+ case IOV_GVAL(IOV_IFACE_POLICY): {
+ int_val = wl_cfg80211_get_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub));
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_IFACE_POLICY): {
+ bcmerror = wl_cfg80211_set_iface_policy(
+ dhd_linux_get_primary_netdev(dhd_pub),
+ arg, len);
+ break;
+ }
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_CFG80211 */
+#endif /* WL_IFACE_MGMT_CONF */
+#ifdef RTT_GEOFENCE_CONT
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+ case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
+ bool enable = 0;
+ dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
+ int_val = enable ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
+ bool enable = *(bool *)arg;
+ dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
+ break;
+ }
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_CONT */
default:
bcmerror = BCME_UNSUPPORTED;
break;
* and should not return error for IOCTLs fired before FW
* Download is done
*/
- if (dhd_fw_download_status(dhd_pub)) {
+ if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
DHD_ERROR(("%s: returning as busstate=%d\n",
__FUNCTION__, dhd_pub->busstate));
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
* to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
* not used in Production platforms but only used in FC19 setups.
*/
- if (!bcmstricmp((char *)buf, "devreset")) {
+ if (!bcmstricmp((char *)buf, "devreset") ||
+#ifdef BCMPCIE
+ (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
+ !bcmstricmp((char *)buf, "dwnldstate")) ||
+#endif /* BCMPCIE */
+ FALSE)
+ {
DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
}
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
} else if (status == WLC_E_STATUS_TIMEOUT) {
DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
- event_name, eabuf, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, (int)status, (int)reason));
} else {
DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
event_name, eabuf, (int)status));
auth_str = "Open System";
else if (auth_type == DOT11_SHARED_KEY)
auth_str = "Shared Key";
+ else if (auth_type == DOT11_SAE)
+ auth_str = "SAE";
else {
snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
auth_str = err_msg;
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
- event_name, eabuf, auth_str, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
+ } else if (status == WLC_E_STATUS_NO_ACK) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
+ event_name, eabuf, auth_str));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
+ event_name, eabuf, auth_str, (int)status, (int)reason));
}
BCM_REFERENCE(auth_str);
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
} else {
if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
} else if (status == WLC_E_STATUS_NO_NETWORKS) {
DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
} else {
case WLC_E_CCA_CHAN_QUAL:
if (datalen) {
cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
- if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
+ if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
+ cca_only_chan_qual_event_t *cca_only_event =
+ (cca_only_chan_qual_event_t *)cca_event;
+ BCM_REFERENCE(cca_only_event);
+ DHD_EVENT((
+ "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
+ " channel 0x%02x\n",
+ event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, cca_event->chanspec));
+ DHD_EVENT((
+ "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
+ " ts 0x%08x)\n",
+ cca_only_event->cca_busy_ext.duration,
+ cca_only_event->cca_busy_ext.congest_ibss,
+ cca_only_event->cca_busy_ext.congest_obss,
+ cca_only_event->cca_busy_ext.interference,
+ cca_only_event->cca_busy_ext.timestamp));
+ DHD_EVENT((
+ "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_nopm.duration,
+ cca_only_event->cca_busy_nopm.congest_ibss,
+ cca_only_event->cca_busy_nopm.congest_obss,
+ cca_only_event->cca_busy_nopm.interference));
+ DHD_EVENT((
+ "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
+ cca_only_event->cca_busy_pm.duration,
+ cca_only_event->cca_busy_pm.congest_ibss,
+ cca_only_event->cca_busy_pm.congest_obss,
+ cca_only_event->cca_busy_pm.interference));
+ } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
DHD_EVENT((
"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
" channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
break;
}
+ case WLC_E_PROXD:
+ {
+ wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
+ DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
+ event_name, proxd->type, reason));
+ break;
+ }
case WLC_E_RPSNOA:
{
rpsnoa_stats_t *stat = event_data;
DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
break;
}
+ case WLC_E_WA_LQM:
+ {
+ wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
+ bcm_xtlv_t *subevent;
+ wl_event_wa_lqm_basic_t *elqm_basic;
+
+ if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
+ (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
+ DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
+ event_name, event_wa_lqm->ver, event_wa_lqm->len));
+ break;
+ }
+
+ subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
+ if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
+ (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
+ DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
+ event_name, subevent->id, subevent->len));
+ break;
+ }
+
+ elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
+ BCM_REFERENCE(elqm_basic);
+ DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
+ event_name, elqm_basic->rssi, elqm_basic->snr,
+ elqm_basic->tx_rate, elqm_basic->rx_rate));
+ break;
+ }
default:
DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
#ifdef DNGL_EVENT_SUPPORT
/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
- int
+int
dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
{
bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
return BCME_OK;
}
+#ifdef PARSE_DONGLE_HOST_EVENT
+typedef struct hck_id_to_str_s {
+ uint32 id;
+ char *name;
+} hck_id_to_str_t;
+
+hck_id_to_str_t hck_sw_id_to_str[] = {
+ {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
+ {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
+ {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
+ {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
+ {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
+ {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
+ {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
+ {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
+ {0, NULL}
+};
+
+hck_id_to_str_t hck_pcie_module_to_str[] = {
+ {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
+ {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
+ {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
+ {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
+ {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
+ {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
+ {0, NULL}
+};
+
+hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
+ {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
+ {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
+ {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
+ {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
+ {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
+ {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
+ {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
+ {0, NULL}
+};
+
+static void
+dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
+{
+ while (hck->name != NULL) {
+ if (hck->id == id) {
+ DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
+ return;
+ }
+ hck++;
+ }
+}
+
+void
+dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
+{
+
+ wl_rx_hc_info_v2_t *hck_rx_stall_v2;
+ uint16 id;
+
+ id = ltoh16(wl_hc->id);
+
+ if (id == WL_HC_DD_RX_STALL_V2) {
+ /* map the hck_rx_stall_v2 structure to the value of the XTLV */
+ hck_rx_stall_v2 =
+ (wl_rx_hc_info_v2_t*)wl_hc;
+ DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
+ " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
+ hck_rx_stall_v2->type,
+ hck_rx_stall_v2->length,
+ hck_rx_stall_v2->if_idx,
+ hck_rx_stall_v2->ac,
+ hck_rx_stall_v2->rx_hc_pkts,
+ hck_rx_stall_v2->rx_hc_dropped_all,
+ hck_rx_stall_v2->rx_hc_alert_th,
+ hck_rx_stall_v2->reason,
+ ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
+ dhd_print_dongle_hck_id(
+ ltoh32(hck_rx_stall_v2->reason),
+ hck_rx_stall_v2_to_str);
+ } else {
+ dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
+ hck_sw_id_to_str);
+ }
+
+}
+
+#endif /* PARSE_DONGLE_HOST_EVENT */
+
void
dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
DHD_EVENT(("Line:%d ", *(uint32 *)p));
p += sizeof(uint32);
DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
case SOCRAM_IND_TAG_HEALTH_CHECK:
ltoh32(pcie_hc->pcie_err_ind_type),
ltoh32(pcie_hc->pcie_flag),
ltoh32(pcie_hc->pcie_control_reg)));
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_print_dongle_hck_id(
+ ltoh32(pcie_hc->pcie_err_ind_type),
+ hck_pcie_module_to_str);
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
#ifdef HCHK_COMMON_SW_EVENT
return;
}
BCM_REFERENCE(wl_hc);
- DHD_EVENT(("WL SW HC type %d len %d",
- ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
+ DHD_EVENT(("WL SW HC type %d len %d\n",
+ ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
+
+#ifdef PARSE_DONGLE_HOST_EVENT
+ dhd_parse_hck_common_sw_event(wl_hc);
+#endif /* PARSE_DONGLE_HOST_EVENT */
break;
+
}
#endif /* HCHK_COMMON_SW_EVENT */
default:
break;
}
default:
- DHD_ERROR(("%s:Unknown TAG", __FUNCTION__));
+ DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, taglen);
}
break;
}
default:
- DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type));
+ DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, datalen);
}
uint evlen;
int ret;
uint16 usr_subtype;
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ dhd_if_t *ifp = NULL;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
ret = wl_host_event_get_data(pktdata, pktlen, &evu);
if (ret != BCME_OK) {
#endif // endif
#if defined(RTT_SUPPORT)
case WLC_E_PROXD:
+#ifndef WL_CFG80211
dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
+#endif /* WL_CFG80211 */
break;
#endif /* RTT_SUPPORT */
/* These are what external supplicant/authenticator wants */
}
break;
#endif /* WL_NAN */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ case WLC_E_REASSOC:
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+
+ if (!ifp)
+ break;
+
+ /* Consider STA role only since roam is disabled on P2P GC.
+ * Drop EAPOL M1 frame only if roam is done to same BSS.
+ */
+ if ((status == WLC_E_STATUS_SUCCESS) &&
+ IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
+ ifp->recv_reassoc_evt = TRUE;
+ }
+ break;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
case WLC_E_LINK:
#ifdef PCIE_FULL_DONGLE
if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
}
}
#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ /* fall through */
+ ifp = dhd_get_ifp(dhd_pub, event->ifidx);
+ if (ifp) {
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
/* fall through */
-
default:
*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
#ifdef DHD_UPDATE_INTF_MAC
memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
#endif // endif
+#ifdef DHD_STATUS_LOGGING
+ if (dhd_pub->statlog) {
+ dhd_statlog_process_event(dhd_pub, type, *ifidx,
+ status, reason, flags);
+ }
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef SHOW_EVENTS
if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
wl_show_host_event(dhd_pub, event,
#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
#endif // endif
-#if defined(PKT_FILTER_SUPPORT)
/* Convert user's input in hex pattern to byte-size mask */
int
wl_pattern_atoh(char *src, char *dst)
return i;
}
+#if defined(PKT_FILTER_SUPPORT)
int
pattern_atoh_len(char *src, char *dst, int len)
{
int rc = -1;
uint32 mask_size;
uint32 pattern_size;
- char *argv[16], * buf = 0;
+ char *argv[MAXPKT_ARG] = {0}, * buf = 0;
int i = 0;
char *arg_save = 0, *arg_org = 0;
}
argv[i] = bcmstrtok(&arg_save, " ", 0);
- while (argv[i++])
+ while (argv[i++]) {
+ if (i >= MAXPKT_ARG) {
+ DHD_ERROR(("Invalid args provided\n"));
+ goto fail;
+ }
argv[i] = bcmstrtok(&arg_save, " ", 0);
+ }
i = 0;
if (argv[i] == NULL) {
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
__FUNCTION__, arg, rc));
else
DHD_TRACE(("%s: successfully added pktfilter %s\n",
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
int retcode;
-
+#ifdef WL_CFG80211
+ /* Do not enable arp offload in case of non-STA interfaces active */
+ if (arp_enable &&
+ (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
+ DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
+ __FUNCTION__));
+ return;
+ }
+#endif /* WL_CFG80211 */
retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
arp_enable, WLC_SET_VAR, TRUE, 0);
DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
__FUNCTION__, arp_enable, retcode));
else
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+#else
DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
__FUNCTION__, arp_enable));
+#endif /* DHD_LOG_DUMP */
if (arp_enable) {
uint32 version;
retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
if (ret < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ else {
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
+#else
+ DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
+ /* mac address isn't cleared here but it will be cleared after dongle off */
+ dhd->hmac_updated = 0;
}
void
ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
if (ret < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ else {
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
+#else
+ DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
}
void
ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
NULL, 0, TRUE);
- if (ret)
+ if (ret < 0)
DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
- else
- DHD_ARPOE(("%s: sARP H ipaddr entry added \n",
- __FUNCTION__));
+ else {
+ /* mac address is updated in the dongle */
+ dhd->hmac_updated = 1;
+#ifdef DHD_LOG_DUMP
+ DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
+#else
+ DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
+#endif /* DHD_LOG_DUMP */
+ }
}
int
int ret = -1;
int allowed_skip_dtim_cnt = 0;
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ return bcn_li_dtim;
+ }
+
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
int ap_beacon = 0;
int allowed_skip_dtim_cnt = 0;
+ if (dhd->disable_dtim_in_suspend) {
+ DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
+ bcn_li_dtim = 0;
+ goto exit;
+ }
+
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
}
#endif /* OEM_ANDROID && BCMPCIE */
+#ifdef CONFIG_SILENT_ROAM
+int
+dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
+{
+ int ret = BCME_OK;
+ wlc_sroam_t *psroam;
+ wlc_sroam_info_t *sroam;
+ uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (set && (dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+ DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
+ return ret;
+ }
+
+ if (!dhd->sroam_turn_on) {
+ DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
+ return ret;
+ }
+ psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
+ if (!psroam) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
+ ret = BCME_VERSION;
+ goto done;
+ }
+
+ sroam = (wlc_sroam_info_t *)psroam->data;
+ sroam->sroam_on = set;
+ DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
+
+ ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
+ }
+
+done:
+ if (psroam) {
+ MFREE(dhd->osh, psroam, sroamlen);
+ }
+ return ret;
+
+}
+#endif /* CONFIG_SILENT_ROAM */
+
/* Check if the mode supports STA MODE */
bool dhd_support_sta_mode(dhd_pub_t *dhd)
{
MFREE(dhd->osh, buffer, length);
}
-#if defined(DHD_8021X_DUMP)
-#define EAP_PRINT(str) \
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \
- ifname, direction ? "TX" : "RX"));
-#else
-#define EAP_PRINT(str)
-#endif /* DHD_8021X_DUMP */
-/* Parse EAPOL 4 way handshake messages */
-void
-dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname,
- char *dump_data, bool direction)
-{
- unsigned char type;
- int pair, ack, mic, kerr, req, sec, install;
- unsigned short us_tmp;
-
- type = dump_data[15];
- if (type == 0) {
- if ((dump_data[22] == 1) && (dump_data[18] == 1)) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_REQID;
- EAP_PRINT("EAP Packet, Request, Identity");
- } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_RSPID;
- EAP_PRINT("EAP Packet, Response, Identity");
- } else if (dump_data[22] == 254) {
- if (dump_data[30] == 1) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_WSC_START;
- EAP_PRINT("EAP Packet, WSC Start");
- } else if (dump_data[30] == 4) {
- if (dump_data[41] == 4) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1;
- EAP_PRINT("EAP Packet, WPS M1");
- } else if (dump_data[41] == 5) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2;
- EAP_PRINT("EAP Packet, WPS M2");
- } else if (dump_data[41] == 7) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3;
- EAP_PRINT("EAP Packet, WPS M3");
- } else if (dump_data[41] == 8) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4;
- EAP_PRINT("EAP Packet, WPS M4");
- } else if (dump_data[41] == 9) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5;
- EAP_PRINT("EAP Packet, WPS M5");
- } else if (dump_data[41] == 10) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6;
- EAP_PRINT("EAP Packet, WPS M6");
- } else if (dump_data[41] == 11) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7;
- EAP_PRINT("EAP Packet, WPS M7");
- } else if (dump_data[41] == 12) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8;
- EAP_PRINT("EAP Packet, WPS M8");
- }
- } else if (dump_data[30] == 5) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_DONE;
- EAP_PRINT("EAP Packet, WSC Done");
- }
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
- } else if (type == 3 && dump_data[18] == 2) {
- us_tmp = (dump_data[19] << 8) | dump_data[20];
- pair = 0 != (us_tmp & 0x08);
- ack = 0 != (us_tmp & 0x80);
- mic = 0 != (us_tmp & 0x100);
- kerr = 0 != (us_tmp & 0x400);
- req = 0 != (us_tmp & 0x800);
- sec = 0 != (us_tmp & 0x200);
- install = 0 != (us_tmp & 0x40);
-
- if (!sec && !mic && ack && !install && pair && !kerr && !req) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_M1;
- EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
- } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_M2;
- EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
- } else if (pair && ack && mic && sec && !kerr && !req) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_M3;
- EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
- } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_M4;
- EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
- } else {
- DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
- ifname, direction ? "TX" : "RX",
- dump_data[14], dump_data[15], dump_data[30]));
- }
-}
-
#ifdef SHOW_LOGTRACE
int
dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
goto error;
}
/* fwid is at the end of fw bin in string format */
- if (dhd_os_seek_file(file, file_len - sizeof(fwid_str) - 1) < 0) {
+ if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
goto error;
}
/* check if fw id in logstrs.bin matches the fw one */
if (hdr->trailer.fw_id != fwid) {
- DHD_ERROR(("%s: logstr id does not match FW!\n", __FUNCTION__));
+ DHD_ERROR(("%s: logstr id does not match FW!"
+ "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
+ __FUNCTION__, hdr->trailer.fw_id, fwid));
goto error;
}
static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
/* Interface specific event ecounters */
- {WLC_E_LINK, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
+ {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
};
/* Accepts an argument to -s, -g or -f and creates an XTLV */
memset(&logset_op, 0, sizeof(logset_op));
logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
logset_type.len = htod16(sizeof(wl_el_set_type_t));
- for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++) {
+ for (i = 0; i < dhd->event_log_max_sets; i++) {
logset_type.set = i;
err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
return ret;
}
+static int
+dhd_ecounter_autoconfig(dhd_pub_t *dhd)
+{
+ int rc = BCME_OK;
+ uint32 buf;
+ rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
+ if (rc != BCME_OK) {
+
+ if (rc != BCME_UNSUPPORTED) {
+ rc = BCME_OK;
+ DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
+ } else {
+ DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
+{
+ int rc = BCME_OK;
+ if (enable) {
+ if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
+ if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
+ } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
+ }
+ }
+ } else {
+ if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
+ } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
+ }
+ }
+ return rc;
+}
+
int
dhd_start_ecounters(dhd_pub_t *dhd)
{
uint8 i = 0;
uint8 *start_ptr;
- uint32 buf;
int rc = BCME_OK;
bcm_xtlv_t *elt;
ecounters_config_request_v2_t *req = NULL;
ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
uint16 total_processed_containers_len = 0;
- rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
-
- if (rc != BCME_UNSUPPORTED)
- return rc;
-
- rc = BCME_OK;
-
for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
}
#ifdef DHD_LOG_DUMP
+int
+dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
+ log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, int buflen, uint32 sec_type)
+{
+ uint32 rlen = 0;
+ uint32 data_len = 0;
+ void *data = NULL;
+ unsigned long flags = 0;
+ int ret = 0;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
+ int pos = 0;
+ int fpos_sechdr = 0;
+
+ if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
+ return BCME_BADARG;
+ }
+ /* do not allow further writes to the ring
+ * till we flush it
+ */
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_SUSPEND;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ if (dhdp->concise_dbg_buf) {
+ /* re-use concise debug buffer temporarily
+ * to pull ring data, to write
+ * record by record to file
+ */
+ data_len = CONCISE_DUMP_BUFLEN;
+ data = dhdp->concise_dbg_buf;
+ ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
+ /* write the section header now with zero length,
+ * once the correct length is found out, update
+ * it later
+ */
+ fpos_sechdr = pos;
+ sec_hdr->type = sec_type;
+ sec_hdr->length = 0;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &pos);
+ do {
+ rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
+ if (rlen > 0) {
+ /* write the log */
+ ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
+ }
+ DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
+ } while ((rlen > 0));
+ /* now update the section header length in the file */
+ /* Complete ring size is dumped by HAL, hence updating length to ring size */
+ sec_hdr->length = ring->ring_size;
+ ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
+ sizeof(*sec_hdr), &fpos_sechdr);
+ } else {
+ DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ return ret;
+}
+
int
dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr)
+ unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
+ char *text_hdr, uint32 sec_type)
{
uint32 rlen = 0;
uint32 data_len = 0, total_len = 0;
int ret = 0;
dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
- if (!dhdp || !ring || !file || !sec_hdr || !file_posn)
+ if (!dhdp || !ring || !file || !sec_hdr ||
+ !file_posn || !text_hdr)
return BCME_BADARG;
/* do not allow further writes to the ring
*/
data_len = CONCISE_DUMP_BUFLEN;
data = dhdp->concise_dbg_buf;
- dhd_os_write_file_posn(file, file_posn, ECNTRS_LOG_HDR,
- strlen(ECNTRS_LOG_HDR));
+ dhd_os_write_file_posn(file, file_posn, text_hdr,
+ strlen(text_hdr));
/* write the section header now with zero length,
* once the correct length is found out, update
* it later
*/
+ dhd_init_sec_hdr(sec_hdr);
fpos_sechdr = *file_posn;
- sec_hdr->type = LOG_DUMP_SECTION_ECNTRS;
+ sec_hdr->type = sec_type;
sec_hdr->length = 0;
dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
sizeof(*sec_hdr));
DHD_DBG_RING_LOCK(ring->lock, flags);
ring->state = RING_ACTIVE;
+ /* Resetting both read and write pointer,
+ * since all items are read.
+ */
+ ring->rp = ring->wp = 0;
DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
return BCME_OK;
}
return BCME_ERROR;
}
- dhdp->logdump_cookie = dhd_ring_init(buf, buf_size,
- LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT);
+ dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
+ LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
+ DHD_RING_TYPE_FIXED);
if (!dhdp->logdump_cookie) {
DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
return BCME_ERROR;
return;
}
if (dhdp->logdump_cookie) {
- dhd_ring_deinit(dhdp->logdump_cookie);
+ dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
}
return;
static inline int
__dhd_log_dump_cookie_to_file(
- dhd_pub_t *dhdp, void *fp, unsigned long *f_pos, char *buf, uint32 buf_size)
+ dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
+ char *buf, uint32 buf_size)
{
uint32 remain = buf_size;
int ret = BCME_ERROR;
char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
log_dump_section_hdr_t sec_hdr;
+ uint32 read_idx;
+ uint32 write_idx;
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
while (dhd_logdump_cookie_count(dhdp) > 0) {
memset(tmp_buf, 0, sizeof(tmp_buf));
ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
}
remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
}
- ret = dhd_os_write_file_posn(fp, f_pos, COOKIE_LOG_HDR, strlen(COOKIE_LOG_HDR));
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
return ret;
sec_hdr.timestamp = local_clock();
sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
sec_hdr.length = buf_size - remain;
- ret = dhd_os_write_file_posn(fp, f_pos, (char *)&sec_hdr, sizeof(sec_hdr));
+
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
return ret;
}
- ret = dhd_os_write_file_posn(fp, f_pos, buf, sec_hdr.length);
+ ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
if (ret < 0) {
DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
}
return ret;
}
+uint32
+dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
+{
+ int len = 0;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ int ret = BCME_ERROR;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= (uint32)strlen(tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ len += strlen(COOKIE_LOG_HDR);
+ len += sizeof(sec_hdr);
+ len += (buf_size - remain);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return len;
+}
+
+int
+dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
+{
+ int ret = BCME_ERROR;
+ char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
+ log_dump_section_hdr_t sec_hdr;
+ char *buf = NULL;
+ uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ int pos = 0;
+ uint32 read_idx;
+ uint32 write_idx;
+ uint32 remain;
+
+ remain = buf_size;
+
+ if (!dhdp || !dhdp->logdump_cookie) {
+ DHD_ERROR(("%s At least one ptr is NULL "
+ "dhdp = %p cookie %p\n",
+ __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
+ goto exit;
+ }
+
+ buf = (char *)MALLOCZ(dhdp->osh, buf_size);
+ if (!buf) {
+ DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
+ goto exit;
+ }
+
+ read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
+ write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
+ while (dhd_logdump_cookie_count(dhdp) > 0) {
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+ remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
+ }
+ dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
+ dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
+ ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
+ sec_hdr.magic = LOG_DUMP_MAGIC;
+ sec_hdr.timestamp = local_clock();
+ sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
+ sec_hdr.length = buf_size - remain;
+ ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
+ ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
+exit:
+ if (buf)
+ MFREE(dhdp->osh, buf, buf_size);
+ return ret;
+}
+
int
-dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos)
+dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
{
char *buf;
int ret = BCME_ERROR;
uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
- if (!dhdp || !dhdp->logdump_cookie ||!fp || !f_pos) {
+ if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
DHD_ERROR(("%s At least one ptr is NULL "
"dhdp = %p cookie %p fp = %p f_pos = %p\n",
__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
return ret;
}
- ret = __dhd_log_dump_cookie_to_file(dhdp, fp, f_pos, buf, buf_size);
+ ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
MFREE(dhdp->osh, buf, buf_size);
return ret;
#endif /* DHD_LOG_DUMP */
#ifdef DHD_LOG_DUMP
+#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
void
dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
{
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
log_dump_type_t *flush_type;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+ uint64 current_time_sec;
if (!dhdp) {
DHD_ERROR(("dhdp is NULL !\n"));
return;
}
- flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
- if (!flush_type) {
- DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
+ current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+ DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
+ __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
+ DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+
+ if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
+ DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
+ __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
return;
}
+
clear_debug_dump_time(dhdp->debug_dump_time_str);
/* */
dhdp->debug_dump_subcmd = subcmd;
+ dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* flush_type is freed at do_dhd_log_dump function */
+ flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
if (flush_type) {
*flush_type = DLD_BUF_TYPE_ALL;
dhd_schedule_log_dump(dhdp, flush_type);
+ } else {
+ DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
+ return;
}
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ /* Inside dhd_mem_dump, event notification will be sent to HAL and
+ * from other context DHD pushes memdump, debug_dump and pktlog dump
+ * to HAL and HAL will write into file
+ */
#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
dhd_bus_mem_dump(dhdp);
#endif /* BCMPCIE && DHD_FW_COREDUMP */
+
}
#endif /* DHD_LOG_DUMP */
}
}
#endif /* DHD_H2D_LOG_TIME_SYNC */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+int
+dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
+{
+ int ret = BCME_OK;
+ bcm_xtlv_t *pxtlv = NULL;
+ uint8 mybuf[DHD_IOVAR_BUF_SIZE];
+ uint16 mybuf_len = sizeof(mybuf);
+ pxtlv = (bcm_xtlv_t *)mybuf;
+
+ ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
+ &he_enab, BCM_XTLV_OPTION_ALIGN32);
+
+ if (ret != BCME_OK) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
+ return ret;
+ }
+
+ ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
+ __FUNCTION__, he_enab, bcmerrorstr(ret)));
+ } else {
+ DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
+ }
+
+ return ret;
+}
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
#include <typedefs.h>\r
#include <osl.h>\r
\r
+#include <bcmendian.h>\r
#include <bcmutils.h>\r
#include <hndsoc.h>\r
#include <bcmsdbus.h>\r
#include <sdio.h>\r
#include <sbchipc.h>\r
#endif\r
+#ifdef WL_CFG80211\r
+#include <wl_cfg80211.h>\r
+#endif\r
\r
#include <dhd_config.h>\r
#include <dhd_dbg.h>\r
+#include <wl_android.h>\r
\r
/* message levels */\r
-#define CONFIG_ERROR_LEVEL 0x0001\r
-#define CONFIG_TRACE_LEVEL 0x0002\r
+#define CONFIG_ERROR_LEVEL (1 << 0)\r
+#define CONFIG_TRACE_LEVEL (1 << 1)\r
+#define CONFIG_MSG_LEVEL (1 << 15)\r
\r
-uint config_msg_level = CONFIG_ERROR_LEVEL;\r
+uint config_msg_level = CONFIG_ERROR_LEVEL | CONFIG_MSG_LEVEL;\r
\r
-#define CONFIG_ERROR(x) \\r
+#define CONFIG_MSG(x, args...) \\r
+ do { \\r
+ if (config_msg_level & CONFIG_MSG_LEVEL) { \\r
+ printk(KERN_ERR "[dhd] %s : " x, __func__, ## args); \\r
+ } \\r
+ } while (0)\r
+#define CONFIG_ERROR(x, args...) \\r
do { \\r
if (config_msg_level & CONFIG_ERROR_LEVEL) { \\r
- printk(KERN_ERR "CONFIG-ERROR) "); \\r
- printk x; \\r
+ printk(KERN_ERR "[dhd] CONFIG-ERROR) %s : " x, __func__, ## args); \\r
} \\r
} while (0)\r
-#define CONFIG_TRACE(x) \\r
+#define CONFIG_TRACE(x, args...) \\r
do { \\r
if (config_msg_level & CONFIG_TRACE_LEVEL) { \\r
- printk(KERN_ERR "CONFIG-TRACE) "); \\r
- printk x; \\r
+ printk(KERN_ERR "[dhd] CONFIG-TRACE) %s : " x, __func__, ## args); \\r
} \\r
} while (0)\r
\r
#define MAXSZ_BUF 1000\r
#define MAXSZ_CONFIG 4096\r
\r
+#ifndef WL_CFG80211\r
#define htod32(i) i\r
#define htod16(i) i\r
#define dtoh32(i) i\r
#define dtoh16(i) i\r
#define htodchanspec(i) i\r
#define dtohchanspec(i) i\r
+#endif\r
+\r
+#if defined(SUSPEND_EVENT) && defined(PROP_TXSTATUS)\r
+#if defined(BCMSDIO) || defined(BCMDBUS)\r
+#include <dhd_wlfc.h>\r
+#endif /* BCMSDIO || BCMDBUS */\r
+#endif /* SUSPEND_EVENT && PROP_TXSTATUS */\r
\r
#define MAX_EVENT_BUF_NUM 16\r
typedef struct eventmsg_buf {\r
uint chip;\r
uint chiprev;\r
uint ag_type;\r
- bool clm;\r
char *chip_name;\r
char *module_name;\r
} cihp_name_map_t;\r
\r
/* Map of WLC_E events to connection failure strings */\r
#define DONT_CARE 9999\r
-const cihp_name_map_t chip_name_map [] = {\r
- /* ChipID Chiprev AG CLM ChipName ModuleName */\r
+const cihp_name_map_t chip_name_map[] = {\r
+ /* ChipID Chiprev AG ChipName ModuleName */\r
#ifdef BCMSDIO\r
- {BCM43362_CHIP_ID, 0, DONT_CARE, FALSE, "bcm40181a0", ""},\r
- {BCM43362_CHIP_ID, 1, DONT_CARE, FALSE, "bcm40181a2", ""},\r
- {BCM4330_CHIP_ID, 4, FW_TYPE_G, FALSE, "bcm40183b2", ""},\r
- {BCM4330_CHIP_ID, 4, FW_TYPE_AG, FALSE, "bcm40183b2_ag", ""},\r
- {BCM43430_CHIP_ID, 0, DONT_CARE, FALSE, "bcm43438a0", "ap6212"},\r
- {BCM43430_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43438a1", "ap6212a"},\r
- {BCM43430_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43436b0", "ap6236"},\r
- {BCM43012_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43013b0", ""},\r
- {BCM4334_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4334b1_ag", ""},\r
- {BCM43340_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""},\r
- {BCM43341_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""},\r
- {BCM4324_CHIP_ID, 5, DONT_CARE, FALSE, "bcm43241b4_ag", ""},\r
- {BCM4335_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4339a0_ag", ""},\r
- {BCM4339_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4339a0_ag", "ap6335"},\r
- {BCM4345_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", "ap6255"},\r
- {BCM43454_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", ""},\r
- {BCM4345_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", "ap6256"},\r
- {BCM43454_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", ""},\r
- {BCM4354_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4354a1_ag", ""},\r
- {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", "ap6356"},\r
- {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""},\r
- {BCM4371_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""},\r
- {BCM43569_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4358a3_ag", ""},\r
- {BCM4359_CHIP_ID, 5, DONT_CARE, FALSE, "bcm4359b1_ag", ""},\r
- {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_ag", "ap6398s"},\r
- {BCM43751_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43751a1_ag", "ap6271s"},\r
+ {BCM43362_CHIP_ID, 0, DONT_CARE, "bcm40181a0", ""},\r
+ {BCM43362_CHIP_ID, 1, DONT_CARE, "bcm40181a2", ""},\r
+ {BCM4330_CHIP_ID, 4, FW_TYPE_G, "bcm40183b2", ""},\r
+ {BCM4330_CHIP_ID, 4, FW_TYPE_AG, "bcm40183b2_ag", ""},\r
+ {BCM43430_CHIP_ID, 0, DONT_CARE, "bcm43438a0", "ap6212"},\r
+ {BCM43430_CHIP_ID, 1, DONT_CARE, "bcm43438a1", "ap6212a"},\r
+ {BCM43430_CHIP_ID, 2, DONT_CARE, "bcm43436b0", "ap6236"},\r
+ {BCM43012_CHIP_ID, 1, FW_TYPE_G, "bcm43013b0", ""},\r
+ {BCM43012_CHIP_ID, 1, FW_TYPE_AG, "bcm43013c0_ag", ""},\r
+ {BCM4334_CHIP_ID, 3, DONT_CARE, "bcm4334b1_ag", ""},\r
+ {BCM43340_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},\r
+ {BCM43341_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},\r
+ {BCM4324_CHIP_ID, 5, DONT_CARE, "bcm43241b4_ag", ""},\r
+ {BCM4335_CHIP_ID, 2, DONT_CARE, "bcm4339a0_ag", ""},\r
+ {BCM4339_CHIP_ID, 1, DONT_CARE, "bcm4339a0_ag", "ap6335"},\r
+ {BCM4345_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", "ap6255"},\r
+ {BCM43454_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", ""},\r
+ {BCM4345_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", "ap6256"},\r
+ {BCM43454_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", ""},\r
+ {BCM4354_CHIP_ID, 1, DONT_CARE, "bcm4354a1_ag", ""},\r
+ {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", "ap6356"},\r
+ {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},\r
+ {BCM4371_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},\r
+ {BCM43569_CHIP_ID, 3, DONT_CARE, "bcm4358a3_ag", ""},\r
+ {BCM4359_CHIP_ID, 5, DONT_CARE, "bcm4359b1_ag", ""},\r
+ {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_ag", "ap6398s"},\r
+ {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_ag", ""},\r
+ {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_ag", ""},\r
+ {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_ag", ""},\r
+ {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_ag", ""},\r
#endif\r
#ifdef BCMPCIE\r
- {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""},\r
- {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""},\r
- {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_pcie_ag", ""},\r
- {BCM4362_CHIP_ID, 0, DONT_CARE, TRUE, "bcm43752a0_pcie_ag", ""},\r
+ {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},\r
+ {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},\r
+ {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_pcie_ag", ""},\r
+ {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_pcie_ag", ""},\r
+ {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_pcie_ag", ""},\r
+ {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_pcie_ag", ""},\r
+ {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_pcie_ag", ""},\r
#endif\r
#ifdef BCMDBUS\r
- {BCM43143_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43143b0", ""},\r
- {BCM43242_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43242a1_ag", ""},\r
- {BCM43569_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4358u_ag", "ap62x8"},\r
+ {BCM43143_CHIP_ID, 2, DONT_CARE, "bcm43143b0", ""},\r
+ {BCM43242_CHIP_ID, 1, DONT_CARE, "bcm43242a1_ag", ""},\r
+ {BCM43569_CHIP_ID, 2, DONT_CARE, "bcm4358u_ag", "ap62x8"},\r
#endif\r
};\r
\r
{\r
int i;\r
\r
- CONFIG_TRACE(("%s called\n", __FUNCTION__));\r
+ CONFIG_TRACE("called\n");\r
if (mac_list->m_mac_list_head) {\r
for (i=0; i<mac_list->count; i++) {\r
if (mac_list->m_mac_list_head[i].mac) {\r
- CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac));\r
+ CONFIG_TRACE("Free mac %p\n", mac_list->m_mac_list_head[i].mac);\r
kfree(mac_list->m_mac_list_head[i].mac);\r
}\r
}\r
- CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head));\r
+ CONFIG_TRACE("Free m_mac_list_head %p\n", mac_list->m_mac_list_head);\r
kfree(mac_list->m_mac_list_head);\r
}\r
mac_list->count = 0;\r
void\r
dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list)\r
{\r
- CONFIG_TRACE(("%s called\n", __FUNCTION__));\r
+ CONFIG_TRACE("called\n");\r
\r
if (chip_nv_list->m_chip_nv_path_head) {\r
- CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head));\r
+ CONFIG_TRACE("Free %p\n", chip_nv_list->m_chip_nv_path_head);\r
kfree(chip_nv_list->m_chip_nv_path_head);\r
}\r
chip_nv_list->count = 0;\r
uint32 gpiocontrol, addr;\r
\r
if (CHIPID(sih->chip) == BCM43362_CHIP_ID) {\r
- printf("%s: Enable HW OOB for 43362\n", __FUNCTION__);\r
+ CONFIG_MSG("Enable HW OOB for 43362\n");\r
addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol);\r
gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);\r
gpiocontrol |= 0x2;\r
bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),\r
&cur_blksize, sizeof(int32), FALSE);\r
if (cur_blksize != blksize) {\r
- printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__,\r
+ CONFIG_MSG("fn=%d, blksize=%d, cur_blksize=%d\n",\r
fn, blksize, cur_blksize);\r
blksize |= (fn<<16);\r
if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize,\r
sizeof(blksize), TRUE) != BCME_OK) {\r
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));\r
+ CONFIG_ERROR("fail on get sd_blocksize");\r
err = -1;\r
}\r
}\r
uint8 *cis;\r
\r
if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {\r
- CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("cis malloc failed\n");\r
return err;\r
}\r
bzero(cis, SBSDIO_CIS_SIZE_LIMIT);\r
\r
if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {\r
- CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err));\r
+ CONFIG_ERROR("cis read err %d\n", err);\r
MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);\r
return err;\r
}\r
if (tpl_link == 0xff)\r
break;\r
if (config_msg_level & CONFIG_TRACE_LEVEL) {\r
- printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n",\r
- __FUNCTION__, tpl_code, tpl_link, *ptr);\r
+ CONFIG_MSG("tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n",\r
+ tpl_code, tpl_link, *ptr);\r
printk("%s: value:", __FUNCTION__);\r
for (i=0; i<tpl_link-1; i++) {\r
printk("%02x ", ptr[i+1]);\r
}\r
\r
void\r
-dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *fw_path)\r
+dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh,\r
+ si_t *sih, char *fw_path)\r
{\r
int i, j;\r
uint8 mac[6]={0};\r
return;\r
\r
if (dhd_conf_get_mac(dhd, sdh, sih, mac)) {\r
- CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));\r
+ CONFIG_ERROR("Can not read MAC address\n");\r
return;\r
}\r
oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
else\r
fw_type_new = FW_TYPE_STA;\r
if (fw_type != fw_type_new) {\r
- printf("%s: fw_typ=%d != fw_type_new=%d\n", __FUNCTION__, fw_type, fw_type_new);\r
+ CONFIG_MSG("fw_typ=%d != fw_type_new=%d\n", fw_type, fw_type_new);\r
continue;\r
}\r
for (j=0; j<mac_num; j++) {\r
if (oui == mac_range[j].oui) {\r
if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
strcpy(name_ptr, mac_list[i].name);\r
- printf("%s: matched oui=0x%06X, nic=0x%06X\n",\r
- __FUNCTION__, oui, nic);\r
- printf("%s: fw_path=%s\n", __FUNCTION__, fw_path);\r
+ CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);\r
+ CONFIG_MSG("fw_path=%s\n", fw_path);\r
return;\r
}\r
}\r
}\r
\r
void\r
-dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path)\r
+dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh,\r
+ si_t *sih, char *nv_path)\r
{\r
int i, j;\r
uint8 mac[6]={0};\r
return;\r
\r
if (dhd_conf_get_mac(dhd, sdh, sih, mac)) {\r
- CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));\r
+ CONFIG_ERROR("Can not read MAC address\n");\r
return;\r
}\r
oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
if (oui == mac_range[j].oui) {\r
if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
strcpy(pnv_name, mac_list[i].name);\r
- printf("%s: matched oui=0x%06X, nic=0x%06X\n",\r
- __FUNCTION__, oui, nic);\r
- printf("%s: nv_path=%s\n", __FUNCTION__, nv_path);\r
+ CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);\r
+ CONFIG_MSG("nv_path=%s\n", nv_path);\r
return;\r
}\r
}\r
{\r
int i;\r
\r
- CONFIG_TRACE(("%s called\n", __FUNCTION__));\r
- for (i=0; i<country_list->count; i++) {\r
+ CONFIG_TRACE("called\n");\r
+ for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {\r
if (country_list->cspec[i]) {\r
- CONFIG_TRACE(("%s Free cspec %p\n", __FUNCTION__, country_list->cspec[i]));\r
+ CONFIG_TRACE("Free cspec %p\n", country_list->cspec[i]);\r
kfree(country_list->cspec[i]);\r
+ country_list->cspec[i] = NULL;\r
}\r
}\r
- country_list->count = 0;\r
}\r
\r
-void\r
+int\r
dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)\r
{\r
int fw_type, ag_type;\r
if (fw_path[0] == '\0')\r
#endif\r
{\r
- printf("firmware path is null\n");\r
- return;\r
+ CONFIG_MSG("firmware path is null\n");\r
+ return 0;\r
}\r
}\r
#ifndef FW_PATH_AUTO_SELECT\r
else\r
fw_type = FW_TYPE_STA;\r
\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
const cihp_name_map_t* row = &chip_name_map[i];\r
if (row->chip == chip && row->chiprev == chiprev &&\r
- (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
+ (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
strcpy(name_ptr, "fw_");\r
strcat(fw_path, row->chip_name);\r
#ifdef BCMUSBDEV_COMPOSITE\r
\r
dhd->conf->fw_type = fw_type;\r
\r
- CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path));\r
+ CONFIG_TRACE("firmware_path=%s\n", fw_path);\r
+ return ag_type;\r
}\r
\r
void\r
-dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path)\r
+dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path, int ag_type)\r
{\r
uint chip, chiprev;\r
int i;\r
chiprev = dhd->conf->chiprev;\r
\r
if (clm_path[0] == '\0') {\r
- printf("clm path is null\n");\r
+ CONFIG_MSG("clm path is null\n");\r
return;\r
}\r
\r
}\r
name_ptr = &clm_path[i];\r
\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev && row->clm) {\r
+ if (row->chip == chip && row->chiprev == chiprev &&\r
+ (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
strcpy(name_ptr, "clm_");\r
strcat(clm_path, row->chip_name);\r
strcat(clm_path, ".blob");\r
}\r
}\r
\r
- CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path));\r
+ CONFIG_TRACE("clm_path=%s\n", clm_path);\r
}\r
\r
void\r
-dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path)\r
+dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path, int ag_type)\r
{\r
uint chip, chiprev;\r
int i;\r
if (nv_path[0] == '\0')\r
#endif\r
{\r
- printf("nvram path is null\n");\r
+ CONFIG_MSG("nvram path is null\n");\r
return;\r
}\r
}\r
}\r
name_ptr = &nv_path[i];\r
\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev && strlen(row->module_name)) {\r
+ if (row->chip == chip && row->chiprev == chiprev &&\r
+ (row->ag_type == ag_type || row->ag_type == DONT_CARE) &&\r
+ strlen(row->module_name)) {\r
strcpy(name_ptr, "nvram_");\r
strcat(name_ptr, row->module_name);\r
#ifdef BCMUSBDEV_COMPOSITE\r
}\r
}\r
\r
- CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path));\r
+ CONFIG_TRACE("nvram_path=%s\n", nv_path);\r
}\r
\r
void\r
-dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)\r
+dhd_conf_copy_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)\r
{\r
int i;\r
\r
if (src_path[0] == '\0') {\r
- printf("src_path is null\n");\r
+ CONFIG_MSG("src_path is null\n");\r
return;\r
} else\r
strcpy(dst_path, src_path);\r
}\r
strcpy(&dst_path[i], dst_name);\r
\r
- CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path));\r
+ CONFIG_TRACE("dst_path=%s\n", dst_path);\r
}\r
\r
#ifdef CONFIG_PATH_AUTO_SELECT\r
chiprev = dhd->conf->chiprev;\r
\r
if (conf_path[0] == '\0') {\r
- printf("config path is null\n");\r
+ CONFIG_MSG("config path is null\n");\r
return;\r
}\r
\r
}\r
}\r
\r
- CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path));\r
+ CONFIG_TRACE("config_path=%s\n", conf_path);\r
}\r
#endif\r
\r
+void\r
+dhd_conf_set_path_params(dhd_pub_t *dhd, void *sdh, void *sih,\r
+ char *fw_path, char *nv_path)\r
+{\r
+ int ag_type;\r
+\r
+ /* External conf takes precedence if specified */\r
+ dhd_conf_preinit(dhd);\r
+\r
+ if (dhd->conf_path[0] == '\0') {\r
+ dhd_conf_copy_path(dhd, "config.txt", dhd->conf_path, nv_path);\r
+ }\r
+ if (dhd->clm_path[0] == '\0') {\r
+ dhd_conf_copy_path(dhd, "clm.blob", dhd->clm_path, fw_path);\r
+ }\r
+#ifdef CONFIG_PATH_AUTO_SELECT\r
+ dhd_conf_set_conf_name_by_chip(dhd, dhd->conf_path);\r
+#endif\r
+\r
+ dhd_conf_read_config(dhd, dhd->conf_path);\r
+\r
+ ag_type = dhd_conf_set_fw_name_by_chip(dhd, fw_path);\r
+ dhd_conf_set_nv_name_by_chip(dhd, nv_path, ag_type);\r
+ dhd_conf_set_clm_name_by_chip(dhd, dhd->clm_path, ag_type);\r
+#ifdef BCMSDIO\r
+ dhd_conf_set_fw_name_by_mac(dhd, (bcmsdh_info_t *)sdh, (si_t *)sih, fw_path);\r
+ dhd_conf_set_nv_name_by_mac(dhd, (bcmsdh_info_t *)sdh, (si_t *)sih, nv_path);\r
+#endif\r
+\r
+ CONFIG_MSG("Final fw_path=%s\n", fw_path);\r
+ CONFIG_MSG("Final nv_path=%s\n", nv_path);\r
+ CONFIG_MSG("Final clm_path=%s\n", dhd->clm_path);\r
+ CONFIG_MSG("Final conf_path=%s\n", dhd->conf_path);\r
+}\r
+\r
int\r
dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,\r
int def, bool down)\r
if (val >= def) {\r
if (down) {\r
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));\r
+ CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);\r
}\r
if (cmd == WLC_SET_VAR) {\r
- CONFIG_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));\r
+ CONFIG_TRACE("set %s %d\n", name, val);\r
bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf));\r
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));\r
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
} else {\r
- CONFIG_TRACE(("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val));\r
+ CONFIG_TRACE("set %s %d %d\n", name, cmd, val);\r
if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));\r
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
}\r
}\r
\r
}\r
\r
int\r
-dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf,\r
- int len, bool down)\r
+dhd_conf_set_bufiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name,\r
+ char *buf, int len, bool down)\r
{\r
char iovbuf[WLC_IOCTL_SMLEN];\r
+ s32 iovar_len;\r
int ret = -1;\r
\r
if (down) {\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));\r
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, ifidx)) < 0)\r
+ CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);\r
}\r
\r
if (cmd == WLC_SET_VAR) {\r
- bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));\r
+ iovar_len = bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));\r
+ if (iovar_len > 0)\r
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx);\r
+ else\r
+ ret = BCME_BUFTOOSHORT;\r
+ if (ret < 0)\r
+ CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len);\r
} else {\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));\r
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, ifidx)) < 0)\r
+ CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
}\r
\r
return ret;\r
}\r
\r
int\r
-dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx)\r
+dhd_conf_get_iovar(dhd_pub_t *dhd, int ifidx, int cmd, char *name,\r
+ char *buf, int len)\r
{\r
char iovbuf[WLC_IOCTL_SMLEN];\r
int ret = -1;\r
if (!ret) {\r
memcpy(buf, iovbuf, len);\r
} else {\r
- CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));\r
+ CONFIG_ERROR("get iovar %s failed %d\n", name, ret);\r
}\r
} else {\r
- CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name));\r
+ CONFIG_ERROR("mkiovar %s failed\n", name);\r
}\r
} else {\r
ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0);\r
if (ret < 0)\r
- CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));\r
+ CONFIG_ERROR("get iovar %s failed %d\n", name, ret);\r
}\r
\r
return ret;\r
}\r
\r
-uint\r
+int\r
dhd_conf_get_band(dhd_pub_t *dhd)\r
{\r
int band = -1;\r
if (dhd && dhd->conf)\r
band = dhd->conf->band;\r
else\r
- CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));\r
+ CONFIG_ERROR("dhd or conf is NULL\n");\r
\r
return band;\r
}\r
\r
memset(cspec, 0, sizeof(wl_country_t));\r
bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0)\r
- CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t),\r
+ FALSE, 0)) < 0)\r
+ CONFIG_ERROR("country code getting failed %d\n", bcmerror);\r
\r
return bcmerror;\r
}\r
struct dhd_conf *conf = dhd->conf;\r
conf_country_list_t *country_list = &conf->country_list;\r
\r
- for (i = 0; i < country_list->count; i++) {\r
- if (!strncmp(cspec->country_abbrev, country_list->cspec[i]->country_abbrev, 2)) {\r
- memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ);\r
- cspec->rev = country_list->cspec[i]->rev;\r
- bcmerror = 0;\r
+ for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {\r
+ if (country_list->cspec[i] != NULL) {\r
+ if (!strncmp("**", country_list->cspec[i]->country_abbrev, 2)) {\r
+ memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ);\r
+ cspec->rev = country_list->cspec[i]->rev;\r
+ bcmerror = 0;\r
+ break;\r
+ } else if (!strncmp(cspec->country_abbrev,\r
+ country_list->cspec[i]->country_abbrev, 2)) {\r
+ memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ);\r
+ cspec->rev = country_list->cspec[i]->rev;\r
+ bcmerror = 0;\r
+ break;\r
+ }\r
}\r
}\r
\r
if (!bcmerror)\r
- printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev);\r
+ CONFIG_MSG("%s/%d\n", cspec->ccode, cspec->rev);\r
\r
return bcmerror;\r
}\r
\r
memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));\r
\r
- printf("%s: set country %s, revision %d\n", __FUNCTION__, cspec->ccode, cspec->rev);\r
- bcmerror = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)cspec,\r
+ CONFIG_MSG("set country %s, revision %d\n", cspec->ccode, cspec->rev);\r
+ bcmerror = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "country", (char *)cspec,\r
sizeof(wl_country_t), FALSE);\r
dhd_conf_get_country(dhd, cspec);\r
- printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev);\r
+ CONFIG_MSG("Country code: %s (%s/%d)\n",\r
+ cspec->country_abbrev, cspec->ccode, cspec->rev);\r
\r
return bcmerror;\r
}\r
dhd_conf_fix_country(dhd_pub_t *dhd)\r
{\r
int bcmerror = -1;\r
- uint band;\r
+ int band;\r
wl_uint32_list_t *list;\r
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];\r
wl_country_t cspec;\r
memset(valid_chan_list, 0, sizeof(valid_chan_list));\r
list = (wl_uint32_list_t *)(void *) valid_chan_list;\r
list->count = htod32(WL_NUMCHANNELS);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) {\r
- CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list,\r
+ sizeof(valid_chan_list), FALSE, 0)) < 0) {\r
+ CONFIG_ERROR("get channels failed with %d\n", bcmerror);\r
}\r
\r
band = dhd_conf_get_band(dhd);\r
\r
- if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) &&\r
+ if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G || band==-1) &&\r
dtoh32(list->count)<11)) {\r
- CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n",\r
- __FUNCTION__, bcmerror, dtoh32(list->count)));\r
+ CONFIG_ERROR("bcmerror=%d, # of channels %d\n",\r
+ bcmerror, dtoh32(list->count));\r
dhd_conf_map_country_list(dhd, &dhd->conf->cspec);\r
if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) {\r
strcpy(cspec.country_abbrev, "US");\r
}\r
} else {\r
match = true;\r
- CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));\r
+ CONFIG_ERROR("dhd or conf is NULL\n");\r
}\r
\r
return match;\r
dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE);\r
\r
if (!conf->roam_off || !conf->roam_off_suspend) {\r
- printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",\r
- (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);\r
+ CONFIG_MSG("set roam_trigger %d\n", conf->roam_trigger[0]);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",\r
+ (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);\r
\r
- printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",\r
- (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);\r
+ CONFIG_MSG("set roam_scan_period %d\n", conf->roam_scan_period[0]);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",\r
+ (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);\r
\r
- printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",\r
- (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);\r
+ CONFIG_MSG("set roam_delta %d\n", conf->roam_delta[0]);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",\r
+ (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);\r
\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod",\r
+ dhd->conf->fullroamperiod, 1, FALSE);\r
}\r
\r
return bcmerror;\r
ev->event[ev->num].set = set;\r
ev->num++;\r
} else {\r
- CONFIG_ERROR(("evenbuffer doesn't support > %u events. Update"\r
- " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));\r
+ CONFIG_ERROR("evenbuffer doesn't support > %u events. Update"\r
+ " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM);\r
ASSERT(0);\r
}\r
}\r
return -EINVAL;\r
\r
/* Read event_msgs mask */\r
- ret = dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs", eventmask, sizeof(eventmask), 0);\r
+ ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", eventmask,\r
+ sizeof(eventmask));\r
if (unlikely(ret)) {\r
- CONFIG_ERROR(("Get event_msgs error (%d)\n", ret));\r
+ CONFIG_ERROR("Get event_msgs error (%d)\n", ret);\r
goto exit;\r
}\r
\r
}\r
\r
/* Write updated Event mask */\r
- ret = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask,\r
+ ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", eventmask,\r
sizeof(eventmask), FALSE);\r
if (unlikely(ret)) {\r
- CONFIG_ERROR(("Set event_msgs error (%d)\n", ret));\r
+ CONFIG_ERROR("Set event_msgs error (%d)\n", ret);\r
}\r
\r
exit:\r
dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);\r
err = dhd_conf_apply_eventbuffer(dhd, &ev_buf);\r
\r
- CONFIG_TRACE(("%s: roam_offload %d\n", __FUNCTION__, enable));\r
+ CONFIG_TRACE("roam_offload %d\n", enable);\r
\r
return err;\r
}\r
memset(¶m, 0, sizeof(param));\r
param.band = WLC_BAND_2G;\r
param.bw_cap = (uint)dhd->conf->bw_cap[0];\r
- printf("%s: set bw_cap 2g 0x%x\n", __FUNCTION__, param.bw_cap);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE);\r
+ CONFIG_MSG("set bw_cap 2g 0x%x\n", param.bw_cap);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m,\r
+ sizeof(param), FALSE);\r
}\r
\r
if (dhd->conf->bw_cap[1] >= 0) {\r
memset(¶m, 0, sizeof(param));\r
param.band = WLC_BAND_5G;\r
param.bw_cap = (uint)dhd->conf->bw_cap[1];\r
- printf("%s: set bw_cap 5g 0x%x\n", __FUNCTION__, param.bw_cap);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE);\r
+ CONFIG_MSG("set bw_cap 5g 0x%x\n", param.bw_cap);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m,\r
+ sizeof(param), FALSE);\r
}\r
}\r
\r
void\r
-dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp)\r
+dhd_conf_get_wme(dhd_pub_t *dhd, int ifidx, int mode, edcf_acparam_t *acp)\r
{\r
int bcmerror = -1;\r
char iovbuf[WLC_IOCTL_SMLEN];\r
bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf));\r
else\r
bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {\r
- CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),\r
+ FALSE, ifidx)) < 0) {\r
+ CONFIG_ERROR("wme_ac_sta getting failed %d\n", bcmerror);\r
return;\r
}\r
memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);\r
\r
acparam = &acp[AC_BK];\r
- CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- __FUNCTION__,\r
+ CONFIG_TRACE("BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP));\r
+ acparam->TXOP);\r
acparam = &acp[AC_BE];\r
- CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- __FUNCTION__,\r
+ CONFIG_TRACE("BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP));\r
+ acparam->TXOP);\r
acparam = &acp[AC_VI];\r
- CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- __FUNCTION__,\r
+ CONFIG_TRACE("VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP));\r
+ acparam->TXOP);\r
acparam = &acp[AC_VO];\r
- CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- __FUNCTION__,\r
+ CONFIG_TRACE("VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP));\r
+ acparam->TXOP);\r
\r
return;\r
}\r
\r
void\r
-dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci)\r
+dhd_conf_update_wme(dhd_pub_t *dhd, int ifidx, int mode,\r
+ edcf_acparam_t *acparam_cur, int aci)\r
{\r
int aifsn, ecwmin, ecwmax, txop;\r
edcf_acparam_t *acp;\r
acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));\r
acp->TXOP = txop;\r
\r
- printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- __FUNCTION__, mode?"ap":"sta",\r
- acp->ACI, acp->ACI&EDCF_AIFSN_MASK,\r
+ CONFIG_MSG("wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
+ mode?"ap":"sta", acp->ACI, acp->ACI&EDCF_AIFSN_MASK,\r
acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
acp->TXOP);\r
\r
* NOTE: only one of the four ACs can be set at a time.\r
*/\r
if (mode == 0)\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE);\r
+ dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_sta", (char *)acp,\r
+ sizeof(edcf_acparam_t), FALSE);\r
else\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE);\r
+ dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_ap", (char *)acp,\r
+ sizeof(edcf_acparam_t), FALSE);\r
\r
}\r
\r
void\r
-dhd_conf_set_wme(dhd_pub_t *dhd, int mode)\r
+dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode)\r
{\r
edcf_acparam_t acparam_cur[AC_COUNT];\r
\r
if (dhd && dhd->conf) {\r
if (!dhd->conf->force_wme_ac) {\r
- CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n",\r
- __FUNCTION__, dhd->conf->force_wme_ac));\r
+ CONFIG_TRACE("force_wme_ac is not enabled %d\n",\r
+ dhd->conf->force_wme_ac);\r
return;\r
}\r
\r
- CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__));\r
- dhd_conf_get_wme(dhd, mode, acparam_cur);\r
+ CONFIG_TRACE("Before change:\n");\r
+ dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);\r
\r
- dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK);\r
- dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE);\r
- dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI);\r
- dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO);\r
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BK], AC_BK);\r
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BE], AC_BE);\r
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VI], AC_VI);\r
+ dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VO], AC_VO);\r
\r
- CONFIG_TRACE(("%s: After change:\n", __FUNCTION__));\r
- dhd_conf_get_wme(dhd, mode, acparam_cur);\r
+ CONFIG_TRACE("After change:\n");\r
+ dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);\r
} else {\r
- CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));\r
+ CONFIG_ERROR("dhd or conf is NULL\n");\r
}\r
\r
return;\r
/*\r
* Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E)\r
* 1) dhd_master_mode=1\r
- * 2) pkt_filter_del=100, 102, 103, 104, 105\r
+ * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107\r
* 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E\r
* 4) magic_pkt_filter_add=141 0 1 12\r
*/\r
for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {\r
dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];\r
- printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]);\r
+ CONFIG_MSG("%s\n", dhd->pktfilter[i+dhd->pktfilter_count]);\r
}\r
dhd->pktfilter_count += i;\r
\r
if (dhd && dhd->conf) {\r
for (i=0; i<dhd->conf->pkt_filter_del.count; i++) {\r
if (id == dhd->conf->pkt_filter_del.id[i]) {\r
- printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);\r
+ CONFIG_MSG("%d\n", dhd->conf->pkt_filter_del.id[i]);\r
return true;\r
}\r
}\r
return -1;\r
}\r
\r
+int\r
+dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,\r
+ int *hostsleep_set, int *hostsleep_val, int *ret)\r
+{\r
+ if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {\r
+ if (cmd == WLC_SET_VAR) {\r
+ char *psleep = NULL;\r
+ psleep = strstr(buf, "hostsleep");\r
+ if (psleep) {\r
+ *hostsleep_set = 1;\r
+ memcpy(hostsleep_val, psleep+strlen("hostsleep")+1, sizeof(int));\r
+ }\r
+ }\r
+ if (dhd->hostsleep && (!*hostsleep_set || *hostsleep_val)) {\r
+ CONFIG_TRACE("block all none hostsleep clr cmd\n");\r
+ *ret = BCME_EPERM;\r
+ goto exit;\r
+ } else if (*hostsleep_set && *hostsleep_val) {\r
+ CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);\r
+ dhd->hostsleep = *hostsleep_val;\r
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);\r
+ }\r
+ if (dhd->hostsleep == 2) {\r
+ *ret = 0;\r
+ goto exit;\r
+ }\r
+ } else if (dhd->hostsleep == 2 && !*hostsleep_val) {\r
+ CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);\r
+ dhd->hostsleep = *hostsleep_val;\r
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
+ }\r
+ *ret = 0;\r
+ goto exit;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+exit:\r
+ return -1;\r
+}\r
+\r
+void\r
+dhd_conf_get_hostsleep(dhd_pub_t *dhd,\r
+ int hostsleep_set, int hostsleep_val, int ret)\r
+{\r
+ if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {\r
+ if (hostsleep_set) {\r
+ if (hostsleep_val && ret) {\r
+ CONFIG_TRACE("reset hostsleep %d => 0\n", dhd->hostsleep);\r
+ dhd->hostsleep = 0;\r
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
+ }\r
+ } else if (!hostsleep_val && !ret) {\r
+ CONFIG_TRACE("set hostsleep %d => 0\n", dhd->hostsleep);\r
+ dhd->hostsleep = 0;\r
+ if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
+ }\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+#ifdef WL_EXT_WOWL\r
+#define WL_WOWL_TCPFIN (1 << 26)\r
+typedef struct wl_wowl_pattern2 {\r
+ char cmd[4];\r
+ wl_wowl_pattern_t wowl_pattern;\r
+} wl_wowl_pattern2_t;\r
+static int\r
+dhd_conf_wowl_pattern(dhd_pub_t *dhd, bool add, char *data)\r
+{\r
+ uint buf_len = 0;\r
+ int id, type, polarity, offset;\r
+ char cmd[4]="\0", mask[128]="\0", pattern[128]="\0", mask_tmp[128]="\0", *pmask_tmp;\r
+ uint32 masksize, patternsize, pad_len = 0;\r
+ wl_wowl_pattern2_t *wowl_pattern2 = NULL;\r
+ char *mask_and_pattern;\r
+ int ret = 0, i, j, v;\r
+\r
+ if (data) {\r
+ if (add)\r
+ strcpy(cmd, "add");\r
+ else\r
+ strcpy(cmd, "clr");\r
+ if (!strcmp(cmd, "clr")) {\r
+ CONFIG_TRACE("wowl_pattern clr\n");\r
+ ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern", cmd,\r
+ sizeof(cmd), FALSE);\r
+ goto exit;\r
+ }\r
+ sscanf(data, "%d %d %d %d %s %s", &id, &type, &polarity, &offset,\r
+ mask_tmp, pattern);\r
+ masksize = strlen(mask_tmp) -2;\r
+ CONFIG_TRACE("0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
+\r
+ // add pading\r
+ if (masksize % 16)\r
+ pad_len = (16 - masksize % 16);\r
+ for (i=0; i<pad_len; i++)\r
+ strcat(mask_tmp, "0");\r
+ masksize += pad_len;\r
+ CONFIG_TRACE("1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
+\r
+ // translate 0x00 to 0, others to 1\r
+ j = 0;\r
+ pmask_tmp = &mask_tmp[2];\r
+ for (i=0; i<masksize/2; i++) {\r
+ if(strncmp(&pmask_tmp[i*2], "00", 2))\r
+ pmask_tmp[j] = '1';\r
+ else\r
+ pmask_tmp[j] = '0';\r
+ j++;\r
+ }\r
+ pmask_tmp[j] = '\0';\r
+ masksize = masksize / 2;\r
+ CONFIG_TRACE("2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
+\r
+ // reorder per 8bits\r
+ pmask_tmp = &mask_tmp[2];\r
+ for (i=0; i<masksize/8; i++) {\r
+ char c;\r
+ for (j=0; j<4; j++) {\r
+ c = pmask_tmp[i*8+j];\r
+ pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];\r
+ pmask_tmp[(i+1)*8-j-1] = c;\r
+ }\r
+ }\r
+ CONFIG_TRACE("3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
+\r
+ // translate 8bits to 1byte\r
+ j = 0; v = 0;\r
+ pmask_tmp = &mask_tmp[2];\r
+ strcpy(mask, "0x");\r
+ for (i=0; i<masksize; i++) {\r
+ v = (v<<1) | (pmask_tmp[i]=='1');\r
+ if (((i+1)%4) == 0) {\r
+ if (v < 10)\r
+ mask[j+2] = v + '0';\r
+ else\r
+ mask[j+2] = (v-10) + 'a';\r
+ j++;\r
+ v = 0;\r
+ }\r
+ }\r
+ mask[j+2] = '\0';\r
+ masksize = j/2;\r
+ CONFIG_TRACE("4 mask=%s, masksize=%d\n", mask, masksize);\r
+\r
+ patternsize = (strlen(pattern)-2)/2;\r
+ buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;\r
+ wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);\r
+ if (wowl_pattern2 == NULL) {\r
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", buf_len);\r
+ goto exit;\r
+ }\r
+ memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));\r
+\r
+ strncpy(wowl_pattern2->cmd, cmd, sizeof(cmd));\r
+ wowl_pattern2->wowl_pattern.id = id;\r
+ wowl_pattern2->wowl_pattern.type = 0;\r
+ wowl_pattern2->wowl_pattern.offset = offset;\r
+ mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);\r
+\r
+ wowl_pattern2->wowl_pattern.masksize = masksize;\r
+ ret = wl_pattern_atoh(mask, mask_and_pattern);\r
+ if (ret == -1) {\r
+ CONFIG_ERROR("rejecting mask=%s\n", mask);\r
+ goto exit;\r
+ }\r
+\r
+ mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;\r
+ wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +\r
+ wowl_pattern2->wowl_pattern.masksize;\r
+\r
+ wowl_pattern2->wowl_pattern.patternsize = patternsize;\r
+ ret = wl_pattern_atoh(pattern, mask_and_pattern);\r
+ if (ret == -1) {\r
+ CONFIG_ERROR("rejecting pattern=%s\n", pattern);\r
+ goto exit;\r
+ }\r
+\r
+ CONFIG_TRACE("%s %d %s %s\n", cmd, offset, mask, pattern);\r
+\r
+ ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern",\r
+ (char *)wowl_pattern2, buf_len, FALSE);\r
+ }\r
+\r
+exit:\r
+ if (wowl_pattern2)\r
+ kfree(wowl_pattern2);\r
+ return ret;\r
+}\r
+\r
+static int\r
+dhd_conf_wowl_wakeind(dhd_pub_t *dhd, bool clear)\r
+{\r
+ s8 iovar_buf[WLC_IOCTL_SMLEN];\r
+ wl_wowl_wakeind_t *wake = NULL;\r
+ int ret = -1;\r
+ char clr[6]="clear", wakeind_str[32]="\0";\r
+\r
+ if (clear) {\r
+ CONFIG_TRACE("wowl_wakeind clear\n");\r
+ ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_wakeind",\r
+ clr, sizeof(clr), 0);\r
+ } else {\r
+ ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "wowl_wakeind",\r
+ iovar_buf, sizeof(iovar_buf));\r
+ if (!ret) {\r
+ wake = (wl_wowl_wakeind_t *) iovar_buf;\r
+ if (wake->ucode_wakeind & WL_WOWL_MAGIC)\r
+ strcpy(wakeind_str, "(MAGIC packet)");\r
+ if (wake->ucode_wakeind & WL_WOWL_NET)\r
+ strcpy(wakeind_str, "(Netpattern)");\r
+ if (wake->ucode_wakeind & WL_WOWL_DIS)\r
+ strcpy(wakeind_str, "(Disassoc/Deauth)");\r
+ if (wake->ucode_wakeind & WL_WOWL_BCN)\r
+ strcpy(wakeind_str, "(Loss of beacon)");\r
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)\r
+ strcpy(wakeind_str, "(TCPKA timeout)");\r
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)\r
+ strcpy(wakeind_str, "(TCPKA data)");\r
+ if (wake->ucode_wakeind & WL_WOWL_TCPFIN)\r
+ strcpy(wakeind_str, "(TCP FIN)");\r
+ CONFIG_MSG("wakeind=0x%x %s\n", wake->ucode_wakeind, wakeind_str);\r
+ }\r
+ }\r
+\r
+ return ret;\r
+}\r
+#endif\r
+\r
+int\r
+dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,\r
+ char *packet, bool bcast)\r
+{\r
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;\r
+ int ret = 0, len_bytes=0, buf_len=0;\r
+ char *buf = NULL, *iovar_buf = NULL;\r
+ struct ether_addr bssid;\r
+ uint8 *pdata;\r
+\r
+ CONFIG_TRACE("id=%d, period=%d, packet=%s\n", id, period, packet);\r
+ if (period >= 0) {\r
+ buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);\r
+ if (buf == NULL) {\r
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);\r
+ goto exit;\r
+ }\r
+ iovar_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);\r
+ if (iovar_buf == NULL) {\r
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);\r
+ goto exit;\r
+ }\r
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;\r
+ mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);\r
+ mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);\r
+ mkeep_alive_pktp->keep_alive_id = id;\r
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;\r
+ mkeep_alive_pktp->period_msec = period;\r
+ if (strlen(packet)) {\r
+ len_bytes = wl_pattern_atoh(packet, (char *)mkeep_alive_pktp->data);\r
+ buf_len += len_bytes;\r
+ if (bcast) {\r
+ memcpy(mkeep_alive_pktp->data, ðer_bcast, ETHER_ADDR_LEN);\r
+ } else {\r
+ memset(&bssid, 0, ETHER_ADDR_LEN);\r
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN,\r
+ FALSE, ifidx);\r
+ if (ret != BCME_NOTASSOCIATED && memcmp(ðer_null, &bssid, ETHER_ADDR_LEN))\r
+ memcpy(mkeep_alive_pktp->data, &bssid, ETHER_ADDR_LEN);\r
+ }\r
+ ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr",\r
+ iovar_buf, WLC_IOCTL_SMLEN);\r
+ if (!ret) {\r
+ pdata = mkeep_alive_pktp->data;\r
+ memcpy(pdata+6, iovar_buf, ETHER_ADDR_LEN);\r
+ }\r
+ }\r
+ mkeep_alive_pktp->len_bytes = htod16(len_bytes);\r
+ ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "mkeep_alive",\r
+ buf, buf_len, FALSE);\r
+ }\r
+\r
+exit:\r
+ if (buf)\r
+ kfree(buf);\r
+ if (iovar_buf)\r
+ kfree(iovar_buf);\r
+ return ret;\r
+}\r
+\r
+#ifdef ARP_OFFLOAD_SUPPORT\r
+void\r
+dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable)\r
+{\r
+ int i, len = 0, total_len = WLC_IOCTL_SMLEN;\r
+ char *iovar_buf = NULL, *packet = NULL;\r
+\r
+ if (!dhd->conf->garp || ifidx != 0 || !(dhd->op_mode & DHD_FLAG_STA_MODE))\r
+ return;\r
+\r
+ CONFIG_TRACE("enable=%d\n", enable);\r
+\r
+ if (enable) {\r
+ iovar_buf = kmalloc(total_len, GFP_KERNEL);\r
+ if (iovar_buf == NULL) {\r
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);\r
+ goto exit;\r
+ }\r
+ packet = kmalloc(total_len, GFP_KERNEL);\r
+ if (packet == NULL) {\r
+ CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);\r
+ goto exit;\r
+ }\r
+ dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", iovar_buf, total_len);\r
+\r
+ len += snprintf(packet+len, total_len, "0xffffffffffff");\r
+ for (i=0; i<ETHER_ADDR_LEN; i++)\r
+ len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);\r
+ len += snprintf(packet+len, total_len, "08060001080006040001");\r
+ for (i=0; i<ETHER_ADDR_LEN; i++)\r
+ len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);\r
+ len += snprintf(packet+len, total_len, "%02x%02x%02x%02x",\r
+ ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff);\r
+ len += snprintf(packet+len, total_len, "ffffffffffffc0a80101000000000000000000000000000000000000");\r
+ }\r
+\r
+ dhd_conf_mkeep_alive(dhd, ifidx, 0, dhd->conf->keep_alive_period, packet, TRUE);\r
+\r
+exit:\r
+ if (iovar_buf)\r
+ kfree(iovar_buf);\r
+ if (packet)\r
+ kfree(packet);\r
+ return;\r
+}\r
+#endif\r
+\r
uint\r
-dhd_conf_get_insuspend(dhd_pub_t *dhd)\r
+dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask)\r
{\r
- uint mode = 0;\r
+ uint insuspend = 0;\r
\r
if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- mode = dhd->conf->insuspend &\r
- (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | ROAM_OFFLOAD_IN_SUSPEND);\r
+ insuspend = dhd->conf->insuspend &\r
+ (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |\r
+ ROAM_OFFLOAD_IN_SUSPEND | WOWL_IN_SUSPEND);\r
} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- mode = dhd->conf->insuspend &\r
- (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | AP_DOWN_IN_SUSPEND);\r
+ insuspend = dhd->conf->insuspend &\r
+ (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |\r
+ AP_DOWN_IN_SUSPEND | AP_FILTER_IN_SUSPEND);\r
}\r
\r
- return mode;\r
+ return (insuspend & mask);\r
}\r
\r
+#ifdef SUSPEND_EVENT\r
+void\r
+dhd_conf_set_suspend_event(dhd_pub_t *dhd, int suspend)\r
+{\r
+ struct dhd_conf *conf = dhd->conf;\r
+ struct ether_addr bssid;\r
+ char suspend_eventmask[WL_EVENTING_MASK_LEN];\r
+ wl_event_msg_t msg;\r
+ int pm;\r
+#ifdef WL_CFG80211\r
+ struct net_device *net;\r
+#endif /* defined(WL_CFG80211) */\r
+\r
+ if (suspend) {\r
+#ifdef PROP_TXSTATUS\r
+#if defined(BCMSDIO) || defined(BCMDBUS)\r
+ if (dhd->wlfc_enabled) {\r
+ dhd_wlfc_deinit(dhd);\r
+ conf->wlfc = TRUE;\r
+ } else {\r
+ conf->wlfc = FALSE;\r
+ }\r
+#endif /* BCMSDIO || BCMDBUS */\r
+#endif /* PROP_TXSTATUS */\r
+ dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs",\r
+ conf->resume_eventmask, sizeof(conf->resume_eventmask));\r
+ memset(suspend_eventmask, 0, sizeof(suspend_eventmask));\r
+ setbit(suspend_eventmask, WLC_E_ESCAN_RESULT);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",\r
+ suspend_eventmask, sizeof(suspend_eventmask), FALSE);\r
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
+ memset(&bssid, 0, ETHER_ADDR_LEN);\r
+ dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, 0);\r
+ if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN))\r
+ memcpy(&conf->bssid_insuspend, &bssid, ETHER_ADDR_LEN);\r
+ else\r
+ memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);\r
+ }\r
+ }\r
+ else {\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",\r
+ conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE);\r
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
+ if (memcmp(ðer_null, &conf->bssid_insuspend, ETHER_ADDR_LEN)) {\r
+ memset(&bssid, 0, ETHER_ADDR_LEN);\r
+ dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN,\r
+ FALSE, 0);\r
+ if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", 0, 0, FALSE);\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "send_nulldata",\r
+ (char *)&bssid, ETHER_ADDR_LEN, FALSE);\r
+ OSL_SLEEP(100);\r
+ if (conf->pm >= 0)\r
+ pm = conf->pm;\r
+ else\r
+ pm = PM_FAST;\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);\r
+ } else {\r
+ CONFIG_TRACE("send WLC_E_DEAUTH_IND event\n");\r
+ bzero(&msg, sizeof(wl_event_msg_t));\r
+ memcpy(&msg.addr, &conf->bssid_insuspend, ETHER_ADDR_LEN);\r
+ msg.event_type = hton32(WLC_E_DEAUTH_IND);\r
+ msg.status = 0;\r
+ msg.reason = hton32(DOT11_RC_DEAUTH_LEAVING);\r
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW)\r
+ wl_ext_event_send(dhd->event_params, &msg, NULL);\r
+#endif\r
+#ifdef WL_CFG80211\r
+ net = dhd_idx2net(dhd, 0);\r
+ if (net) {\r
+ wl_cfg80211_event(net, &msg, NULL);\r
+ }\r
+#endif /* defined(WL_CFG80211) */\r
+ }\r
+ }\r
+#ifdef PROP_TXSTATUS\r
+#if defined(BCMSDIO) || defined(BCMDBUS)\r
+ if (conf->wlfc) {\r
+ dhd_wlfc_init(dhd);\r
+ dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
+ }\r
+#endif\r
+#endif /* PROP_TXSTATUS */\r
+ }\r
+ }\r
+\r
+}\r
+#endif\r
+\r
int\r
dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend)\r
{\r
- uint mode = 0, wl_down = 1;\r
+ uint insuspend = 0;\r
struct dhd_conf *conf = dhd->conf;\r
+#ifdef WL_EXT_WOWL\r
+ int i;\r
+#endif\r
+\r
+ insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);\r
+ if (insuspend)\r
+ CONFIG_MSG("op_mode %d, suspend %d, suspended %d, insuspend 0x%x\n",\r
+ dhd->op_mode, suspend, conf->suspended, insuspend);\r
\r
- mode = dhd_conf_get_insuspend(dhd);\r
- if (mode)\r
- printf("%s: op_mode %d, suspend %d, mode 0x%x\n", __FUNCTION__,\r
- dhd->op_mode, suspend, mode);\r
+ if (conf->suspended == suspend) {\r
+ return 0;\r
+ }\r
\r
if (suspend) {\r
if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (mode & ROAM_OFFLOAD_IN_SUSPEND)\r
- dhd_conf_enable_roam_offload(dhd, 1);\r
+ if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)\r
+ dhd_conf_enable_roam_offload(dhd, 2);\r
} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- if (mode & AP_DOWN_IN_SUSPEND) {\r
- dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,\r
- sizeof(wl_down), TRUE, 0);\r
+ if (insuspend & AP_DOWN_IN_SUSPEND) {\r
+ dhd_conf_set_intiovar(dhd, WLC_DOWN, "WLC_DOWN", 1, 0, FALSE);\r
}\r
}\r
#ifdef SUSPEND_EVENT\r
- if (mode & NO_EVENT_IN_SUSPEND) {\r
- char suspend_eventmask[WL_EVENTING_MASK_LEN];\r
- if (!conf->suspended) {\r
- dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs",\r
- conf->resume_eventmask, sizeof(conf->resume_eventmask), 0);\r
- }\r
- memset(suspend_eventmask, 0, sizeof(suspend_eventmask));\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs",\r
- suspend_eventmask, sizeof(suspend_eventmask), FALSE);\r
+ if (insuspend & NO_EVENT_IN_SUSPEND) {\r
+ dhd_conf_set_suspend_event(dhd, suspend);\r
}\r
#endif\r
+ if (insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);\r
+ }\r
+#ifdef WL_EXT_WOWL\r
+ if ((insuspend & WOWL_IN_SUSPEND) && dhd_master_mode) {\r
+ dhd_conf_wowl_pattern(dhd, FALSE, "clr");\r
+ for(i=0; i<conf->pkt_filter_add.count; i++) {\r
+ dhd_conf_wowl_pattern(dhd, TRUE, conf->pkt_filter_add.filter[i]);\r
+ }\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", conf->wowl, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 1, 0, FALSE);\r
+ dhd_conf_wowl_wakeind(dhd, TRUE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 1, 0, FALSE);\r
+ } else\r
+#endif\r
+ if (insuspend & NO_TXCTL_IN_SUSPEND) {\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 2, 0, FALSE);\r
+ }\r
conf->suspended = TRUE;\r
} else {\r
+ if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) {\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 0, 0, FALSE);\r
+ }\r
+#ifdef WL_EXT_WOWL\r
+ if (insuspend & WOWL_IN_SUSPEND) {\r
+ dhd_conf_wowl_wakeind(dhd, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 0, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", 0, 0, FALSE);\r
+ dhd_conf_wowl_pattern(dhd, FALSE, "clr");\r
+ }\r
+#endif\r
#ifdef SUSPEND_EVENT\r
- if (mode & NO_EVENT_IN_SUSPEND) {\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs",\r
- conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE);\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- struct ether_addr bssid;\r
- int ret = 0;\r
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid,\r
- sizeof(struct ether_addr), FALSE, 0);\r
- if (ret != BCME_NOTASSOCIATED && !memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {\r
- CONFIG_TRACE(("%s: send disassoc\n", __FUNCTION__));\r
- dhd_conf_set_intiovar(dhd, WLC_DISASSOC, "WLC_DISASSOC", 0, 0, FALSE);\r
- }\r
- }\r
+ if (insuspend & NO_EVENT_IN_SUSPEND) {\r
+ dhd_conf_set_suspend_event(dhd, suspend);\r
}\r
#endif\r
if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (mode & ROAM_OFFLOAD_IN_SUSPEND)\r
+ if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)\r
dhd_conf_enable_roam_offload(dhd, 0);\r
} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- if (mode & AP_DOWN_IN_SUSPEND) {\r
- wl_down = 0;\r
- dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_down,\r
- sizeof(wl_down), TRUE, 0);\r
+ if (insuspend & AP_DOWN_IN_SUSPEND) {\r
+ dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
}\r
}\r
+ if (insuspend & NO_TXDATA_IN_SUSPEND) {\r
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
+ }\r
conf->suspended = FALSE;\r
}\r
\r
disable_proptx = 1;\r
}\r
\r
- printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx);\r
+ CONFIG_MSG("fw_proptx=%d, disable_proptx=%d\n", fw_proptx, disable_proptx);\r
\r
return disable_proptx;\r
}\r
column = 0;\r
\r
if (start_pos >= len) {\r
- CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__));\r
+ CONFIG_ERROR("wrong start pos\n");\r
return 0;\r
}\r
\r
return n; // return current position\r
}\r
\r
+bool\r
+dhd_conf_read_chiprev(dhd_pub_t *dhd, int *chip_match,\r
+ char *full_param, uint len_param)\r
+{\r
+ char *data = full_param+len_param, *pick_tmp, *pch;\r
+ uint chip = 0, rev = 0;\r
+\r
+ /* Process chip, regrev:\r
+ * chip=[chipid], rev==[rev]\r
+ * Ex: chip=0x4359, rev=9\r
+ */\r
+ if (!strncmp("chip=", full_param, len_param)) {\r
+ chip = (int)simple_strtol(data, NULL, 0);\r
+ pick_tmp = data;\r
+ pch = bcmstrstr(pick_tmp, "rev=");\r
+ if (pch) {\r
+ rev = (int)simple_strtol(pch+strlen("rev="), NULL, 0);\r
+ }\r
+ if (chip == dhd->conf->chip && rev == dhd->conf->chiprev)\r
+ *chip_match = 1;\r
+ else\r
+ *chip_match = 0;\r
+ CONFIG_MSG("chip=0x%x, rev=%d, chip_match=%d\n", chip, rev, *chip_match);\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
bool\r
dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param)\r
{\r
\r
if (!strncmp("dhd_msg_level=", full_param, len_param)) {\r
dhd_msg_level = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level);\r
+ CONFIG_MSG("dhd_msg_level = 0x%X\n", dhd_msg_level);\r
}\r
#ifdef BCMSDIO\r
else if (!strncmp("sd_msglevel=", full_param, len_param)) {\r
sd_msglevel = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel);\r
+ CONFIG_MSG("sd_msglevel = 0x%X\n", sd_msglevel);\r
}\r
#endif\r
#ifdef BCMDBUS\r
else if (!strncmp("dbus_msglevel=", full_param, len_param)) {\r
dbus_msglevel = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: dbus_msglevel = 0x%X\n", __FUNCTION__, dbus_msglevel);\r
+ CONFIG_MSG("dbus_msglevel = 0x%X\n", dbus_msglevel);\r
}\r
#endif\r
else if (!strncmp("android_msg_level=", full_param, len_param)) {\r
android_msg_level = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level);\r
+ CONFIG_MSG("android_msg_level = 0x%X\n", android_msg_level);\r
}\r
else if (!strncmp("config_msg_level=", full_param, len_param)) {\r
config_msg_level = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level);\r
+ CONFIG_MSG("config_msg_level = 0x%X\n", config_msg_level);\r
}\r
#ifdef WL_CFG80211\r
else if (!strncmp("wl_dbg_level=", full_param, len_param)) {\r
wl_dbg_level = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level);\r
+ CONFIG_MSG("wl_dbg_level = 0x%X\n", wl_dbg_level);\r
}\r
#endif\r
#if defined(WL_WIRELESS_EXT)\r
else if (!strncmp("iw_msg_level=", full_param, len_param)) {\r
iw_msg_level = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level);\r
+ CONFIG_MSG("iw_msg_level = 0x%X\n", iw_msg_level);\r
}\r
#endif\r
#if defined(DHD_DEBUG)\r
else if (!strncmp("dhd_console_ms=", full_param, len_param)) {\r
dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd->dhd_console_ms);\r
+ CONFIG_MSG("dhd_console_ms = 0x%X\n", dhd->dhd_console_ms);\r
}\r
#endif\r
else\r
pch = bcmstrstr(pick_tmp, "aifsn ");\r
if (pch) {\r
wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0);\r
- printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]);\r
+ CONFIG_MSG("ac_val=%d, aifsn=%d\n", ac_val, wme->aifsn[ac_val]);\r
}\r
pick_tmp = pick;\r
pch = bcmstrstr(pick_tmp, "ecwmin ");\r
if (pch) {\r
wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0);\r
- printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]);\r
+ CONFIG_MSG("ac_val=%d, ecwmin=%d\n", ac_val, wme->ecwmin[ac_val]);\r
}\r
pick_tmp = pick;\r
pch = bcmstrstr(pick_tmp, "ecwmax ");\r
if (pch) {\r
wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0);\r
- printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]);\r
+ CONFIG_MSG("ac_val=%d, ecwmax=%d\n", ac_val, wme->ecwmax[ac_val]);\r
}\r
pick_tmp = pick;\r
pch = bcmstrstr(pick_tmp, "txop ");\r
if (pch) {\r
wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0);\r
- printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]);\r
+ CONFIG_MSG("ac_val=%d, txop=0x%x\n", ac_val, wme->txop[ac_val]);\r
}\r
\r
}\r
\r
if (!strncmp("force_wme_ac=", full_param, len_param)) {\r
conf->force_wme_ac = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac);\r
+ CONFIG_MSG("force_wme_ac = %d\n", conf->force_wme_ac);\r
}\r
else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) {\r
dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE);\r
return true;\r
}\r
\r
+#ifdef BCMSDIO\r
bool\r
dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)\r
{\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) {\r
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count,\r
+ GFP_KERNEL))) {\r
conf->fw_by_mac.count = 0;\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
}\r
- printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count);\r
+ CONFIG_MSG("fw_count=%d\n", conf->fw_by_mac.count);\r
conf->fw_by_mac.m_mac_list_head = mac_list;\r
for (i=0; i<conf->fw_by_mac.count; i++) {\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
strcpy(mac_list[i].name, pch);\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,\r
+ CONFIG_MSG("name=%s, mac_count=%d\n",\r
mac_list[i].name, mac_list[i].count);\r
- if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {\r
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,\r
+ GFP_KERNEL))) {\r
mac_list[i].count = 0;\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
break;\r
}\r
mac_list[i].mac = mac_range;\r
mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
- __FUNCTION__, mac_range[j].oui,\r
- mac_range[j].nic_start, mac_range[j].nic_end);\r
+ CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
+ mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);\r
}\r
}\r
}\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) {\r
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count,\r
+ GFP_KERNEL))) {\r
conf->nv_by_mac.count = 0;\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
}\r
- printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count);\r
+ CONFIG_MSG("nv_count=%d\n", conf->nv_by_mac.count);\r
conf->nv_by_mac.m_mac_list_head = mac_list;\r
for (i=0; i<conf->nv_by_mac.count; i++) {\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
strcpy(mac_list[i].name, pch);\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,\r
+ CONFIG_MSG("name=%s, mac_count=%d\n",\r
mac_list[i].name, mac_list[i].count);\r
- if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {\r
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,\r
+ GFP_KERNEL))) {\r
mac_list[i].count = 0;\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
break;\r
}\r
mac_list[i].mac = mac_range;\r
mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
- __FUNCTION__, mac_range[j].oui,\r
- mac_range[j].nic_start, mac_range[j].nic_end);\r
+ CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
+ mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);\r
}\r
}\r
}\r
\r
return true;\r
}\r
+#endif\r
\r
bool\r
dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param)\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) {\r
+ if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count,\r
+ GFP_KERNEL))) {\r
conf->nv_by_chip.count = 0;\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
}\r
- printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count);\r
+ CONFIG_MSG("nv_by_chip_count=%d\n", conf->nv_by_chip.count);\r
conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path;\r
for (i=0; i<conf->nv_by_chip.count; i++) {\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0);\r
pch = bcmstrtok(&pick_tmp, " ", 0);\r
strcpy(chip_nv_path[i].name, pch);\r
- printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__,\r
+ CONFIG_MSG("chip=0x%x, chiprev=%d, name=%s\n",\r
chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);\r
}\r
}\r
conf->roam_off = 0;\r
else\r
conf->roam_off = 1;\r
- printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off);\r
+ CONFIG_MSG("roam_off = %d\n", conf->roam_off);\r
}\r
else if (!strncmp("roam_off_suspend=", full_param, len_param)) {\r
if (!strncmp(data, "0", 1))\r
conf->roam_off_suspend = 0;\r
else\r
conf->roam_off_suspend = 1;\r
- printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend);\r
+ CONFIG_MSG("roam_off_suspend = %d\n", conf->roam_off_suspend);\r
}\r
else if (!strncmp("roam_trigger=", full_param, len_param)) {\r
conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: roam_trigger = %d\n", __FUNCTION__,\r
- conf->roam_trigger[0]);\r
+ CONFIG_MSG("roam_trigger = %d\n", conf->roam_trigger[0]);\r
}\r
else if (!strncmp("roam_scan_period=", full_param, len_param)) {\r
conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: roam_scan_period = %d\n", __FUNCTION__,\r
- conf->roam_scan_period[0]);\r
+ CONFIG_MSG("roam_scan_period = %d\n", conf->roam_scan_period[0]);\r
}\r
else if (!strncmp("roam_delta=", full_param, len_param)) {\r
conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]);\r
+ CONFIG_MSG("roam_delta = %d\n", conf->roam_delta[0]);\r
}\r
else if (!strncmp("fullroamperiod=", full_param, len_param)) {\r
conf->fullroamperiod = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: fullroamperiod = %d\n", __FUNCTION__,\r
- conf->fullroamperiod);\r
+ CONFIG_MSG("fullroamperiod = %d\n", conf->fullroamperiod);\r
} else\r
return false;\r
\r
bool\r
dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param)\r
{\r
- int i;\r
- char *pch, *pick_tmp, *pick_tmp2;\r
struct dhd_conf *conf = dhd->conf;\r
+ conf_country_list_t *country_list = &conf->country_list;\r
+ int i, count = 0;\r
+ char *pch, *pick_tmp, *pick_tmp2;\r
char *data = full_param+len_param;\r
wl_country_t *cspec;\r
- conf_country_list_t *country_list = NULL;\r
+ uint len_data = strlen(data);\r
\r
/* Process country_list:\r
* country_list=[country1]:[ccode1]/[regrev1],\r
* [country2]:[ccode2]/[regrev2] \\r
* Ex: country_list=US:US/0, TW:TW/1\r
*/\r
- if (!strncmp("country_list=", full_param, len_param)) {\r
- country_list = &dhd->conf->country_list;\r
+ if (!strncmp("ccode=", full_param, len_param)) {\r
+ len_data = min((uint)WLC_CNTRY_BUF_SZ, len_data);\r
+ memset(&conf->cspec, 0, sizeof(wl_country_t));\r
+ memcpy(conf->cspec.country_abbrev, data, len_data);\r
+ memcpy(conf->cspec.ccode, data, len_data);\r
+ CONFIG_MSG("ccode = %s\n", conf->cspec.ccode);\r
}\r
- if (country_list) {\r
+ else if (!strncmp("regrev=", full_param, len_param)) {\r
+ conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);\r
+ CONFIG_MSG("regrev = %d\n", conf->cspec.rev);\r
+ }\r
+ else if (!strncmp("country_list=", full_param, len_param)) {\r
pick_tmp = data;\r
for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {\r
pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);\r
break;\r
cspec = NULL;\r
if (!(cspec = kmalloc(sizeof(wl_country_t), GFP_KERNEL))) {\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
break;\r
}\r
memset(cspec, 0, sizeof(wl_country_t));\r
break;\r
}\r
cspec->rev = (int32)simple_strtol(pch, NULL, 10);\r
- country_list->count++;\r
+ count++;\r
country_list->cspec[i] = cspec;\r
- CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__,\r
- cspec->country_abbrev, cspec->ccode, cspec->rev));\r
- }\r
- if (!strncmp("country_list=", full_param, len_param)) {\r
- printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count);\r
+ CONFIG_TRACE("country_list abbrev=%s, ccode=%s, regrev=%d\n",\r
+ cspec->country_abbrev, cspec->ccode, cspec->rev);\r
}\r
+ CONFIG_MSG("%d country in list\n", count);\r
}\r
else\r
return false;\r
} else {\r
conf->mchan[i].bw = (int)simple_strtol(pch, NULL, 0);\r
if (conf->mchan[i].bw < 0 || conf->mchan[i].bw > 100) {\r
- CONFIG_ERROR(("%s: wrong bw %d\n", __FUNCTION__, conf->mchan[i].bw));\r
+ CONFIG_ERROR("wrong bw %d\n", conf->mchan[i].bw);\r
conf->mchan[i].bw = 0;\r
break;\r
}\r
}\r
for (i=0; i<MCHAN_MAX_NUM; i++) {\r
if (conf->mchan[i].bw >= 0)\r
- printf("%s: mchan_bw=%d/%d/%d\n", __FUNCTION__,\r
- conf->mchan[i].bw, conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode);\r
+ CONFIG_MSG("mchan_bw=%d/%d/%d\n", conf->mchan[i].bw,\r
+ conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode);\r
}\r
}\r
else\r
\r
/* Process pkt filter:\r
* 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000\r
- * 2) pkt_filter_del=100, 102, 103, 104, 105\r
+ * 2) pkt_filter_delete=100, 102, 103, 104, 105\r
* 3) magic_pkt_filter_add=141 0 1 12\r
*/\r
if (!strncmp("dhd_master_mode=", full_param, len_param)) {\r
dhd_master_mode = FALSE;\r
else\r
dhd_master_mode = TRUE;\r
- printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode);\r
+ CONFIG_MSG("dhd_master_mode = %d\n", dhd_master_mode);\r
}\r
else if (!strncmp("pkt_filter_add=", full_param, len_param)) {\r
pick_tmp = data;\r
i=0;\r
while (pch != NULL && i<DHD_CONF_FILTER_MAX) {\r
strcpy(&conf->pkt_filter_add.filter[i][0], pch);\r
- printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]);\r
+ CONFIG_MSG("pkt_filter_add[%d][] = %s\n",\r
+ i, &conf->pkt_filter_add.filter[i][0]);\r
pch = bcmstrtok(&pick_tmp, ",.-", 0);\r
i++;\r
}\r
conf->pkt_filter_add.count = i;\r
}\r
- else if (!strncmp("pkt_filter_del=", full_param, len_param)) {\r
+ else if (!strncmp("pkt_filter_delete=", full_param, len_param) ||\r
+ !strncmp("pkt_filter_del=", full_param, len_param)) {\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
i=0;\r
i++;\r
}\r
conf->pkt_filter_del.count = i;\r
- printf("%s: pkt_filter_del id = ", __FUNCTION__);\r
+ CONFIG_MSG("pkt_filter_del id = ");\r
for (i=0; i<conf->pkt_filter_del.count; i++)\r
printf("%d ", conf->pkt_filter_del.id[i]);\r
printf("\n");\r
}\r
else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) {\r
if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) {\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
} else {\r
memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN);\r
strcpy(conf->magic_pkt_filter_add, data);\r
- printf("%s: magic_pkt_filter_add = %s\n", __FUNCTION__, conf->magic_pkt_filter_add);\r
+ CONFIG_MSG("magic_pkt_filter_add = %s\n", conf->magic_pkt_filter_add);\r
}\r
}\r
else\r
#endif\r
\r
#ifdef ISAM_PREINIT\r
+#if !defined(WL_EXT_IAPSTA)\r
+#error "WL_EXT_IAPSTA should be defined to enable ISAM_PREINIT"\r
+#endif /* !WL_EXT_IAPSTA */\r
/*\r
* isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1]\r
* isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x]\r
\r
if (!strncmp("isam_init=", full_param, len_param)) {\r
sprintf(conf->isam_init, "isam_init %s", data);\r
- printf("%s: isam_init=%s\n", __FUNCTION__, conf->isam_init);\r
+ CONFIG_MSG("isam_init=%s\n", conf->isam_init);\r
}\r
else if (!strncmp("isam_config=", full_param, len_param)) {\r
sprintf(conf->isam_config, "isam_config %s", data);\r
- printf("%s: isam_config=%s\n", __FUNCTION__, conf->isam_config);\r
+ CONFIG_MSG("isam_config=%s\n", conf->isam_config);\r
}\r
else if (!strncmp("isam_enable=", full_param, len_param)) {\r
sprintf(conf->isam_enable, "isam_enable %s", data);\r
- printf("%s: isam_enable=%s\n", __FUNCTION__, conf->isam_enable);\r
+ CONFIG_MSG("isam_enable=%s\n", conf->isam_enable);\r
}\r
else\r
return false;\r
\r
if (!strncmp("dhcpc_enable=", full_param, len_param)) {\r
conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable);\r
+ CONFIG_MSG("dhcpc_enable = %d\n", conf->dhcpc_enable);\r
}\r
else if (!strncmp("dhcpd_enable=", full_param, len_param)) {\r
conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: dhcpd_enable = %d\n", __FUNCTION__, conf->dhcpd_enable);\r
+ CONFIG_MSG("dhcpd_enable = %d\n", conf->dhcpd_enable);\r
}\r
else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set))\r
- printf("%s : dhcpd_ip_addr adress setting failed.\n", __FUNCTION__);\r
- conf->dhcpd_ip_addr = ipa_set;\r
- printf("%s: dhcpd_ip_addr = %s\n",__FUNCTION__, data);\r
+ if (!bcm_atoipv4(data, &ipa_set)) {\r
+ CONFIG_ERROR("dhcpd_ip_addr adress setting failed.n");\r
+ return false;\r
+ }\r
+ memcpy(&conf->dhcpd_ip_addr, &ipa_set, sizeof(struct ipv4_addr));\r
+ CONFIG_MSG("dhcpd_ip_addr = %s\n", data);\r
}\r
else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set))\r
- printf("%s : dhcpd_ip_mask adress setting failed.\n", __FUNCTION__);\r
- conf->dhcpd_ip_mask = ipa_set;\r
- printf("%s: dhcpd_ip_mask = %s\n",__FUNCTION__, data);\r
+ if (!bcm_atoipv4(data, &ipa_set)) {\r
+ CONFIG_ERROR("dhcpd_ip_mask adress setting failed\n");\r
+ return false;\r
+ }\r
+ memcpy(&conf->dhcpd_ip_mask, &ipa_set, sizeof(struct ipv4_addr));\r
+ CONFIG_MSG("dhcpd_ip_mask = %s\n", data);\r
}\r
else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set))\r
- printf("%s : dhcpd_ip_start adress setting failed.\n", __FUNCTION__);\r
- conf->dhcpd_ip_start = ipa_set;\r
- printf("%s: dhcpd_ip_start = %s\n",__FUNCTION__, data);\r
+ if (!bcm_atoipv4(data, &ipa_set)) {\r
+ CONFIG_ERROR("dhcpd_ip_start adress setting failed\n");\r
+ return false;\r
+ }\r
+ memcpy(&conf->dhcpd_ip_start, &ipa_set, sizeof(struct ipv4_addr));\r
+ CONFIG_MSG("dhcpd_ip_start = %s\n", data);\r
}\r
else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set))\r
- printf("%s : dhcpd_ip_end adress setting failed.\n", __FUNCTION__);\r
- conf->dhcpd_ip_end = ipa_set;\r
- printf("%s: dhcpd_ip_end = %s\n",__FUNCTION__, data);\r
+ if (!bcm_atoipv4(data, &ipa_set)) {\r
+ CONFIG_ERROR("dhcpd_ip_end adress setting failed\n");\r
+ return false;\r
+ }\r
+ memcpy(&conf->dhcpd_ip_end, &ipa_set, sizeof(struct ipv4_addr));\r
+ CONFIG_MSG("dhcpd_ip_end = %s\n", data);\r
}\r
else\r
return false;\r
dhd_doflow = FALSE;\r
else\r
dhd_doflow = TRUE;\r
- printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);\r
+ CONFIG_MSG("dhd_doflow = %d\n", dhd_doflow);\r
}\r
else if (!strncmp("dhd_slpauto=", full_param, len_param) ||\r
!strncmp("kso_enable=", full_param, len_param)) {\r
dhd_slpauto = FALSE;\r
else\r
dhd_slpauto = TRUE;\r
- printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto);\r
+ CONFIG_MSG("dhd_slpauto = %d\n", dhd_slpauto);\r
}\r
else if (!strncmp("use_rxchain=", full_param, len_param)) {\r
conf->use_rxchain = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain);\r
+ CONFIG_MSG("use_rxchain = %d\n", conf->use_rxchain);\r
}\r
else if (!strncmp("dhd_txminmax=", full_param, len_param)) {\r
conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax);\r
+ CONFIG_MSG("dhd_txminmax = %d\n", conf->dhd_txminmax);\r
}\r
else if (!strncmp("txinrx_thres=", full_param, len_param)) {\r
conf->txinrx_thres = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres);\r
- }\r
- else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) {\r
- conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize);\r
+ CONFIG_MSG("txinrx_thres = %d\n", conf->txinrx_thres);\r
}\r
#if defined(HW_OOB)\r
else if (!strncmp("oob_enabled_later=", full_param, len_param)) {\r
conf->oob_enabled_later = FALSE;\r
else\r
conf->oob_enabled_later = TRUE;\r
- printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later);\r
+ CONFIG_MSG("oob_enabled_later = %d\n", conf->oob_enabled_later);\r
}\r
#endif\r
else if (!strncmp("dpc_cpucore=", full_param, len_param)) {\r
conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore);\r
+ CONFIG_MSG("dpc_cpucore = %d\n", conf->dpc_cpucore);\r
}\r
else if (!strncmp("rxf_cpucore=", full_param, len_param)) {\r
conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: rxf_cpucore = %d\n", __FUNCTION__, conf->rxf_cpucore);\r
- }\r
- else if (!strncmp("orphan_move=", full_param, len_param)) {\r
- conf->orphan_move = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: orphan_move = %d\n", __FUNCTION__, conf->orphan_move);\r
+ CONFIG_MSG("rxf_cpucore = %d\n", conf->rxf_cpucore);\r
}\r
#if defined(BCMSDIOH_TXGLOM)\r
else if (!strncmp("txglomsize=", full_param, len_param)) {\r
conf->txglomsize = (uint)simple_strtol(data, NULL, 10);\r
if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)\r
conf->txglomsize = SDPCM_MAXGLOM_SIZE;\r
- printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize);\r
+ CONFIG_MSG("txglomsize = %d\n", conf->txglomsize);\r
}\r
else if (!strncmp("txglom_ext=", full_param, len_param)) {\r
if (!strncmp(data, "0", 1))\r
conf->txglom_ext = FALSE;\r
else\r
conf->txglom_ext = TRUE;\r
- printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext);\r
+ CONFIG_MSG("txglom_ext = %d\n", conf->txglom_ext);\r
if (conf->txglom_ext) {\r
if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))\r
conf->txglom_bucket_size = 1680;\r
conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)\r
conf->txglom_bucket_size = 1684;\r
}\r
- printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size);\r
+ CONFIG_MSG("txglom_bucket_size = %d\n", conf->txglom_bucket_size);\r
}\r
else if (!strncmp("bus:rxglom=", full_param, len_param)) {\r
if (!strncmp(data, "0", 1))\r
conf->bus_rxglom = FALSE;\r
else\r
conf->bus_rxglom = TRUE;\r
- printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom);\r
+ CONFIG_MSG("bus:rxglom = %d\n", conf->bus_rxglom);\r
}\r
else if (!strncmp("deferred_tx_len=", full_param, len_param)) {\r
conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len);\r
+ CONFIG_MSG("deferred_tx_len = %d\n", conf->deferred_tx_len);\r
}\r
else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) {\r
conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix);\r
+ CONFIG_MSG("txctl_tmo_fix = %d\n", conf->txctl_tmo_fix);\r
}\r
else if (!strncmp("tx_max_offset=", full_param, len_param)) {\r
conf->tx_max_offset = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset);\r
+ CONFIG_MSG("tx_max_offset = %d\n", conf->tx_max_offset);\r
}\r
else if (!strncmp("txglom_mode=", full_param, len_param)) {\r
if (!strncmp(data, "0", 1))\r
conf->txglom_mode = FALSE;\r
else\r
conf->txglom_mode = TRUE;\r
- printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode);\r
+ CONFIG_MSG("txglom_mode = %d\n", conf->txglom_mode);\r
+ }\r
+#if defined(SDIO_ISR_THREAD)\r
+ else if (!strncmp("intr_extn=", full_param, len_param)) {\r
+ if (!strncmp(data, "0", 1))\r
+ conf->intr_extn = FALSE;\r
+ else\r
+ conf->intr_extn = TRUE;\r
+ CONFIG_MSG("intr_extn = %d\n", conf->intr_extn);\r
+ }\r
+#endif\r
+#ifdef BCMSDIO_RXLIM_POST\r
+ else if (!strncmp("rxlim_en=", full_param, len_param)) {\r
+ if (!strncmp(data, "0", 1))\r
+ conf->rxlim_en = FALSE;\r
+ else\r
+ conf->rxlim_en = TRUE;\r
+ CONFIG_MSG("rxlim_en = %d\n", conf->rxlim_en);\r
}\r
+#endif\r
#endif\r
else\r
return false;\r
conf->bus_deepsleep_disable = 0;\r
else\r
conf->bus_deepsleep_disable = 1;\r
- printf("%s: bus:deepsleep_disable = %d\n", __FUNCTION__, conf->bus_deepsleep_disable);\r
+ CONFIG_MSG("bus:deepsleep_disable = %d\n", conf->bus_deepsleep_disable);\r
}\r
else\r
return false;\r
conf->deepsleep = TRUE;\r
else\r
conf->deepsleep = FALSE;\r
- printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep);\r
+ CONFIG_MSG("deepsleep = %d\n", conf->deepsleep);\r
}\r
else if (!strncmp("PM=", full_param, len_param)) {\r
conf->pm = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: PM = %d\n", __FUNCTION__, conf->pm);\r
+ CONFIG_MSG("PM = %d\n", conf->pm);\r
}\r
else if (!strncmp("pm_in_suspend=", full_param, len_param)) {\r
conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend);\r
+ CONFIG_MSG("pm_in_suspend = %d\n", conf->pm_in_suspend);\r
}\r
else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) {\r
conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: suspend_bcn_li_dtim = %d\n", __FUNCTION__, conf->suspend_bcn_li_dtim);\r
+ CONFIG_MSG("suspend_bcn_li_dtim = %d\n", conf->suspend_bcn_li_dtim);\r
}\r
else if (!strncmp("xmit_in_suspend=", full_param, len_param)) {\r
if (!strncmp(data, "1", 1))\r
conf->insuspend &= ~NO_TXDATA_IN_SUSPEND;\r
else\r
conf->insuspend |= NO_TXDATA_IN_SUSPEND;\r
- printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend);\r
+ CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);\r
}\r
else if (!strncmp("insuspend=", full_param, len_param)) {\r
conf->insuspend = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend);\r
+ CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);\r
+ }\r
+#ifdef WL_EXT_WOWL\r
+ else if (!strncmp("wowl=", full_param, len_param)) {\r
+ conf->wowl = (int)simple_strtol(data, NULL, 0);\r
+ CONFIG_MSG("wowl = 0x%x\n", conf->wowl);\r
}\r
+#endif\r
else\r
return false;\r
\r
return true;\r
}\r
\r
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
+int\r
+bcm_str2hex(const char *p, char *ea, int size)\r
+{\r
+ int i = 0;\r
+ char *ep;\r
+\r
+ for (;;) {\r
+ ea[i++] = (char) bcm_strtoul(p, &ep, 16);\r
+ p = ep;\r
+ if (!*p++ || i == size)\r
+ break;\r
+ }\r
+\r
+ return (i == size);\r
+}\r
+#endif\r
+\r
bool\r
dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param)\r
{\r
struct dhd_conf *conf = dhd->conf;\r
char *data = full_param+len_param;\r
- uint len_data = strlen(data);\r
char *pch, *pick_tmp;\r
int i;\r
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
+ struct ether_addr ea_addr;\r
+ char macpad[56];\r
+#endif\r
\r
if (!strncmp("dhd_poll=", full_param, len_param)) {\r
if (!strncmp(data, "0", 1))\r
conf->dhd_poll = 0;\r
else\r
conf->dhd_poll = 1;\r
- printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll);\r
+ CONFIG_MSG("dhd_poll = %d\n", conf->dhd_poll);\r
}\r
else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) {\r
dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: dhd_watchdog_ms = %d\n", __FUNCTION__, dhd_watchdog_ms);\r
+ CONFIG_MSG("dhd_watchdog_ms = %d\n", dhd_watchdog_ms);\r
}\r
else if (!strncmp("band=", full_param, len_param)) {\r
/* Process band:\r
conf->band = WLC_BAND_5G;\r
else\r
conf->band = WLC_BAND_AUTO;\r
- printf("%s: band = %d\n", __FUNCTION__, conf->band);\r
+ CONFIG_MSG("band = %d\n", conf->band);\r
}\r
else if (!strncmp("bw_cap_2g=", full_param, len_param)) {\r
conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0);\r
- printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap[0]);\r
+ CONFIG_MSG("bw_cap_2g = %d\n", conf->bw_cap[0]);\r
}\r
else if (!strncmp("bw_cap_5g=", full_param, len_param)) {\r
conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0);\r
- printf("%s: bw_cap_5g = %d\n", __FUNCTION__, conf->bw_cap[1]);\r
+ CONFIG_MSG("bw_cap_5g = %d\n", conf->bw_cap[1]);\r
}\r
else if (!strncmp("bw_cap=", full_param, len_param)) {\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
if (pch != NULL) {\r
conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: bw_cap 2g = %d\n", __FUNCTION__, conf->bw_cap[0]);\r
+ CONFIG_MSG("bw_cap 2g = %d\n", conf->bw_cap[0]);\r
}\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
if (pch != NULL) {\r
conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0);\r
- printf("%s: bw_cap 5g = %d\n", __FUNCTION__, conf->bw_cap[1]);\r
+ CONFIG_MSG("bw_cap 5g = %d\n", conf->bw_cap[1]);\r
}\r
}\r
- else if (!strncmp("ccode=", full_param, len_param)) {\r
- memset(&conf->cspec, 0, sizeof(wl_country_t));\r
- memcpy(conf->cspec.country_abbrev, data, len_data);\r
- memcpy(conf->cspec.ccode, data, len_data);\r
- printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode);\r
- }\r
- else if (!strncmp("regrev=", full_param, len_param)) {\r
- conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);\r
- printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev);\r
- }\r
else if (!strncmp("channels=", full_param, len_param)) {\r
pick_tmp = data;\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
i++;\r
}\r
conf->channels.count = i;\r
- printf("%s: channels = ", __FUNCTION__);\r
+ CONFIG_MSG("channels = ");\r
for (i=0; i<conf->channels.count; i++)\r
printf("%d ", conf->channels.channel[i]);\r
printf("\n");\r
}\r
else if (!strncmp("keep_alive_period=", full_param, len_param)) {\r
conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: keep_alive_period = %d\n", __FUNCTION__,\r
- conf->keep_alive_period);\r
+ CONFIG_MSG("keep_alive_period = %d\n", conf->keep_alive_period);\r
}\r
- else if (!strncmp("phy_oclscdenable=", full_param, len_param)) {\r
- conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable);\r
+#ifdef ARP_OFFLOAD_SUPPORT\r
+ else if (!strncmp("garp=", full_param, len_param)) {\r
+ if (!strncmp(data, "0", 1))\r
+ conf->garp = FALSE;\r
+ else\r
+ conf->garp = TRUE;\r
+ CONFIG_MSG("garp = %d\n", conf->garp);\r
}\r
+#endif\r
else if (!strncmp("srl=", full_param, len_param)) {\r
conf->srl = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: srl = %d\n", __FUNCTION__, conf->srl);\r
+ CONFIG_MSG("srl = %d\n", conf->srl);\r
}\r
else if (!strncmp("lrl=", full_param, len_param)) {\r
conf->lrl = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl);\r
+ CONFIG_MSG("lrl = %d\n", conf->lrl);\r
}\r
else if (!strncmp("bcn_timeout=", full_param, len_param)) {\r
conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);\r
- }\r
- else if (!strncmp("txbf=", full_param, len_param)) {\r
- conf->txbf = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf);\r
+ CONFIG_MSG("bcn_timeout = %d\n", conf->bcn_timeout);\r
}\r
else if (!strncmp("frameburst=", full_param, len_param)) {\r
conf->frameburst = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst);\r
+ CONFIG_MSG("frameburst = %d\n", conf->frameburst);\r
}\r
else if (!strncmp("disable_proptx=", full_param, len_param)) {\r
conf->disable_proptx = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx);\r
+ CONFIG_MSG("disable_proptx = %d\n", conf->disable_proptx);\r
}\r
#ifdef DHDTCPACK_SUPPRESS\r
else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) {\r
conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode);\r
+ CONFIG_MSG("tcpack_sup_mode = %d\n", conf->tcpack_sup_mode);\r
}\r
#endif\r
else if (!strncmp("pktprio8021x=", full_param, len_param)) {\r
conf->pktprio8021x = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x);\r
+ CONFIG_MSG("pktprio8021x = %d\n", conf->pktprio8021x);\r
}\r
#if defined(BCMSDIO) || defined(BCMPCIE)\r
else if (!strncmp("dhd_txbound=", full_param, len_param)) {\r
dhd_txbound = (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound);\r
+ CONFIG_MSG("dhd_txbound = %d\n", dhd_txbound);\r
}\r
else if (!strncmp("dhd_rxbound=", full_param, len_param)) {\r
dhd_rxbound = (uint)simple_strtol(data, NULL, 10);\r
- printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound);\r
+ CONFIG_MSG("dhd_rxbound = %d\n", dhd_rxbound);\r
}\r
#endif\r
+ else if (!strncmp("orphan_move=", full_param, len_param)) {\r
+ conf->orphan_move = (int)simple_strtol(data, NULL, 10);\r
+ CONFIG_MSG("orphan_move = %d\n", conf->orphan_move);\r
+ }\r
else if (!strncmp("tsq=", full_param, len_param)) {\r
conf->tsq = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq);\r
+ CONFIG_MSG("tsq = %d\n", conf->tsq);\r
}\r
else if (!strncmp("ctrl_resched=", full_param, len_param)) {\r
conf->ctrl_resched = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: ctrl_resched = %d\n", __FUNCTION__, conf->ctrl_resched);\r
- }\r
- else if (!strncmp("dhd_ioctl_timeout_msec=", full_param, len_param)) {\r
- conf->dhd_ioctl_timeout_msec = (int)simple_strtol(data, NULL, 10);\r
- printf("%s: dhd_ioctl_timeout_msec = %d\n", __FUNCTION__, conf->dhd_ioctl_timeout_msec);\r
+ CONFIG_MSG("ctrl_resched = %d\n", conf->ctrl_resched);\r
}\r
else if (!strncmp("in4way=", full_param, len_param)) {\r
conf->in4way = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: in4way = 0x%x\n", __FUNCTION__, conf->in4way);\r
- }\r
- else if (!strncmp("max_wait_gc_time=", full_param, len_param)) {\r
- conf->max_wait_gc_time = (int)simple_strtol(data, NULL, 0);\r
- printf("%s: max_wait_gc_time = %d\n", __FUNCTION__, conf->max_wait_gc_time);\r
+ CONFIG_MSG("in4way = 0x%x\n", conf->in4way);\r
}\r
else if (!strncmp("wl_preinit=", full_param, len_param)) {\r
if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) {\r
- CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("kmalloc failed\n");\r
} else {\r
memset(conf->wl_preinit, 0, len_param+1);\r
strcpy(conf->wl_preinit, data);\r
- printf("%s: wl_preinit = %s\n", __FUNCTION__, conf->wl_preinit);\r
+ CONFIG_MSG("wl_preinit = %s\n", conf->wl_preinit);\r
+ }\r
+ }\r
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
+ else if (!strncmp("mac=", full_param, len_param)) {\r
+ if (!bcm_ether_atoe(data, &ea_addr)) {\r
+ CONFIG_ERROR("mac adress read error");\r
+ return false;\r
+ }\r
+ memcpy(&conf->hw_ether, &ea_addr, ETHER_ADDR_LEN);\r
+ CONFIG_MSG("mac = %s\n", data);\r
+ }\r
+ else if (!strncmp("macpad=", full_param, len_param)) {\r
+ if (!bcm_str2hex(data, macpad, sizeof(macpad))) {\r
+ CONFIG_ERROR("macpad adress read error");\r
+ return false;\r
+ }\r
+ memcpy(&conf->hw_ether[ETHER_ADDR_LEN], macpad, sizeof(macpad));\r
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {\r
+ printf("macpad =\n");\r
+ for (i=0; i<sizeof(macpad); i++) {\r
+ printf("0x%02x, ", conf->hw_ether[ETHER_ADDR_LEN+i]);\r
+ if ((i+1)%8 == 0)\r
+ printf("\n");\r
+ }\r
}\r
}\r
+#endif\r
else\r
return false;\r
\r
int\r
dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)\r
{\r
- int bcmerror = -1;\r
+ int bcmerror = -1, chip_match = -1;\r
uint len = 0, start_pos=0;\r
void * image = NULL;\r
char * memblock = NULL;\r
if (conf_file_exists) {\r
image = dhd_os_open_image1(dhd, conf_path);\r
if (image == NULL) {\r
- printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path);\r
+ CONFIG_MSG("Ignore config file %s\n", conf_path);\r
goto err;\r
}\r
}\r
\r
memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);\r
if (memblock == NULL) {\r
- CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
- __FUNCTION__, MAXSZ_CONFIG));\r
+ CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_CONFIG);\r
goto err;\r
}\r
\r
pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
if (!pick) {\r
- CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
- __FUNCTION__, MAXSZ_BUF));\r
+ CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_BUF);\r
goto err;\r
}\r
\r
if (pch != NULL) {\r
len_param = pch-pick+1;\r
if (len_param == strlen(pick)) {\r
- CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));\r
+ CONFIG_ERROR("not a right parameter %s\n", pick);\r
continue;\r
}\r
} else {\r
- CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));\r
+ CONFIG_ERROR("not a right parameter %s\n", pick);\r
continue;\r
}\r
\r
+ dhd_conf_read_chiprev(dhd, &chip_match, pick, len_param);\r
+ if (!chip_match)\r
+ continue;\r
+\r
if (dhd_conf_read_log_level(dhd, pick, len_param))\r
continue;\r
else if (dhd_conf_read_roam_params(dhd, pick, len_param))\r
continue;\r
else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param))\r
continue;\r
+#ifdef BCMSDIO\r
else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param))\r
continue;\r
else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param))\r
continue;\r
+#endif\r
else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param))\r
continue;\r
else if (dhd_conf_read_country_list(dhd, pick, len_param))\r
\r
bcmerror = 0;\r
} else {\r
- CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));\r
+ CONFIG_ERROR("error reading config file: %d\n", len);\r
bcmerror = BCME_SDIO_ERROR;\r
}\r
\r
int\r
dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)\r
{\r
- printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev);\r
+ CONFIG_MSG("chip=0x%x, chiprev=%d\n", chip, chiprev);\r
dhd->conf->chip = chip;\r
dhd->conf->chiprev = chiprev;\r
return 0;\r
}\r
#endif\r
// other parameters set in preinit or config.txt\r
+ if (conf->txglom_ext)\r
+ CONFIG_MSG("txglom_ext=%d, txglom_bucket_size=%d\n",\r
+ conf->txglom_ext, conf->txglom_bucket_size);\r
+ CONFIG_MSG("txglom_mode=%s\n",\r
+ conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");\r
+ CONFIG_MSG("txglomsize=%d, deferred_tx_len=%d\n",\r
+ conf->txglomsize, conf->deferred_tx_len);\r
+ CONFIG_MSG("txinrx_thres=%d, dhd_txminmax=%d\n",\r
+ conf->txinrx_thres, conf->dhd_txminmax);\r
+ CONFIG_MSG("tx_max_offset=%d, txctl_tmo_fix=%d\n",\r
+ conf->tx_max_offset, conf->txctl_tmo_fix);\r
} else {\r
// clear txglom parameters\r
conf->txglom_ext = FALSE;\r
conf->txglomsize = 0;\r
conf->deferred_tx_len = 0;\r
}\r
- if (conf->txglom_ext)\r
- printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__,\r
- conf->txglom_ext, conf->txglom_bucket_size);\r
- printf("%s: txglom_mode=%s\n", __FUNCTION__,\r
- conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");\r
- printf("%s: txglomsize=%d, deferred_tx_len=%d\n", __FUNCTION__,\r
- conf->txglomsize, conf->deferred_tx_len);\r
- printf("%s: txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__,\r
- conf->txinrx_thres, conf->dhd_txminmax);\r
- printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__,\r
- conf->tx_max_offset, conf->txctl_tmo_fix);\r
\r
}\r
#endif\r
rsdb_mode_cfg.config = (int)simple_strtol(pch, NULL, 0);\r
\r
if (pch) {\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg,\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg,\r
sizeof(rsdb_mode_cfg), TRUE);\r
- printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config);\r
+ CONFIG_MSG("rsdb_mode %d\n", rsdb_mode_cfg.config);\r
}\r
\r
return 0;\r
struct dhd_conf *conf = dhd->conf;\r
char wl_preinit[] = "assoc_retry_max=30";\r
\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
dhd_conf_map_country_list(dhd, &conf->cspec);\r
dhd_conf_set_country(dhd, &conf->cspec);\r
dhd_conf_fix_country(dhd);\r
\r
dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE);\r
dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "PM", conf->pm, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", conf->pm, 0, FALSE);\r
dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE);\r
dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE);\r
dhd_conf_set_bw_cap(dhd);\r
#endif /* defined(BCMPCIE) */\r
\r
#ifdef IDHCP\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, 0, FALSE);\r
- if (dhd->conf->dhcpd_enable >= 0) {\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr",\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable,\r
+ 0, FALSE);\r
+ if (conf->dhcpd_enable >= 0) {\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_addr",\r
(char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask",\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_mask",\r
(char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start",\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_start",\r
(char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE);\r
- dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end",\r
+ dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_end",\r
(char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE);\r
dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable",\r
conf->dhcpd_enable, 0, FALSE);\r
}\r
#endif\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", conf->txbf, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", conf->frameburst, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG",\r
+ conf->frameburst, 0, FALSE);\r
+\r
dhd_conf_set_wl_preinit(dhd, wl_preinit);\r
+#if defined(BCMSDIO)\r
+ {\r
+ char ampdu_mpdu[] = "ampdu_mpdu=16";\r
+ dhd_conf_set_wl_preinit(dhd, ampdu_mpdu);\r
+ }\r
+#endif\r
+ if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
+ conf->chip == BCM4371_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||\r
+ conf->chip == BCM43569_CHIP_ID) {\r
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", 1, 0, FALSE);\r
+ }\r
+#if defined(WLEASYMESH)\r
+ {\r
+ char ezmesh[] = "mbss=1, rsdb_mode=0";\r
+ dhd_conf_set_wl_preinit(dhd, ezmesh);\r
+ }\r
+#endif\r
dhd_conf_set_wl_preinit(dhd, conf->wl_preinit);\r
\r
#ifndef WL_CFG80211\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);\r
+ dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
#endif\r
\r
}\r
struct dhd_conf *conf = dhd->conf;\r
int i;\r
\r
- CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
+ CONFIG_TRACE("Enter\n");\r
\r
#ifdef BCMSDIO\r
dhd_conf_free_mac_list(&conf->fw_by_mac);\r
memset(&conf->channels, 0, sizeof(wl_channel_list_t));\r
conf->roam_off = 1;\r
conf->roam_off_suspend = 1;\r
-#ifdef CUSTOM_ROAM_TRIGGER_SETTING\r
- conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING;\r
-#else\r
conf->roam_trigger[0] = -65;\r
-#endif\r
conf->roam_trigger[1] = WLC_BAND_ALL;\r
conf->roam_scan_period[0] = 10;\r
conf->roam_scan_period[1] = WLC_BAND_ALL;\r
-#ifdef CUSTOM_ROAM_DELTA_SETTING\r
- conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING;\r
-#else\r
- conf->roam_delta[0] = 15;\r
-#endif\r
+ conf->roam_delta[0] = 10;\r
conf->roam_delta[1] = WLC_BAND_ALL;\r
-#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC\r
- conf->fullroamperiod = 60;\r
-#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */\r
- conf->fullroamperiod = 120;\r
-#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */\r
-#ifdef CUSTOM_KEEP_ALIVE_SETTING\r
- conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING;\r
-#else\r
- conf->keep_alive_period = 28000;\r
+ conf->fullroamperiod = 20;\r
+ conf->keep_alive_period = 30000;\r
+#ifdef ARP_OFFLOAD_SUPPORT\r
+ conf->garp = FALSE;\r
#endif\r
conf->force_wme_ac = 0;\r
memset(&conf->wme_sta, 0, sizeof(wme_param_t));\r
memset(&conf->wme_ap, 0, sizeof(wme_param_t));\r
- conf->phy_oclscdenable = -1;\r
#ifdef PKT_FILTER_SUPPORT\r
memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));\r
memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));\r
conf->srl = -1;\r
conf->lrl = -1;\r
conf->bcn_timeout = 16;\r
- conf->txbf = -1;\r
conf->disable_proptx = -1;\r
conf->dhd_poll = -1;\r
#ifdef BCMSDIO\r
conf->tx_max_offset = 0;\r
conf->txglomsize = SDPCM_DEFGLOM_SIZE;\r
conf->txctl_tmo_fix = 300;\r
- conf->txglom_mode = SDPCM_TXGLOM_CPY;\r
+ conf->txglom_mode = SDPCM_TXGLOM_MDESC;\r
conf->deferred_tx_len = 0;\r
conf->dhd_txminmax = 1;\r
conf->txinrx_thres = -1;\r
- conf->sd_f2_blocksize = 0;\r
+#if defined(SDIO_ISR_THREAD)\r
+ conf->intr_extn = FALSE;\r
+#endif\r
+#ifdef BCMSDIO_RXLIM_POST\r
+ conf->rxlim_en = TRUE;\r
+#endif\r
#if defined(HW_OOB)\r
conf->oob_enabled_later = FALSE;\r
#endif\r
- conf->orphan_move = 0;\r
#endif\r
#ifdef BCMPCIE\r
conf->bus_deepsleep_disable = 1;\r
conf->pm = -1;\r
conf->pm_in_suspend = -1;\r
conf->suspend_bcn_li_dtim = -1;\r
+#ifdef WL_EXT_WOWL\r
+ dhd_master_mode = TRUE;\r
+ conf->wowl = WL_WOWL_NET|WL_WOWL_DIS|WL_WOWL_BCN;\r
+ conf->insuspend = WOWL_IN_SUSPEND | NO_TXDATA_IN_SUSPEND;\r
+#else\r
conf->insuspend = 0;\r
+#endif\r
+ conf->suspended = FALSE;\r
#ifdef SUSPEND_EVENT\r
memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask));\r
+ memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);\r
+ conf->wlfc = FALSE;\r
+#endif\r
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
+ memset(&conf->hw_ether, 0, sizeof(conf->hw_ether));\r
#endif\r
#ifdef IDHCP\r
conf->dhcpc_enable = -1;\r
conf->dhcpd_enable = -1;\r
#endif\r
+ conf->orphan_move = 0;\r
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))\r
conf->tsq = 10;\r
#else\r
#endif\r
conf->pktprio8021x = -1;\r
conf->ctrl_resched = 2;\r
- conf->dhd_ioctl_timeout_msec = 0;\r
conf->in4way = NO_SCAN_IN4WAY | WAIT_DISCONNECTED;\r
- conf->max_wait_gc_time = 300;\r
#ifdef ISAM_PREINIT\r
memset(conf->isam_init, 0, sizeof(conf->isam_init));\r
memset(conf->isam_config, 0, sizeof(conf->isam_config));\r
}\r
#ifdef CUSTOMER_HW_AMLOGIC\r
dhd_slpauto = FALSE;\r
+#ifdef BCMSDIO\r
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;\r
#endif\r
- if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
+#endif\r
+#if defined(SDIO_ISR_THREAD)\r
+ if (conf->chip == BCM43012_CHIP_ID ||\r
+ conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
+ conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||\r
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
+ conf->chip == BCM4359_CHIP_ID || conf->chip == BCM43751_CHIP_ID || \r
+ conf->chip == BCM43752_CHIP_ID) {\r
+ conf->intr_extn = TRUE;\r
+ }\r
+#endif\r
+ if ((conf->chip == BCM43430_CHIP_ID && conf->chiprev == 2) ||\r
+ conf->chip == BCM43012_CHIP_ID ||\r
+ conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
+ conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||\r
conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||\r
- conf->chip == BCM4362_CHIP_ID || conf->chip == BCM43751_CHIP_ID) {\r
+ conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID) {\r
#ifdef DHDTCPACK_SUPPRESS\r
#ifdef BCMSDIO\r
conf->tcpack_sup_mode = TCPACK_SUP_REPLACE;\r
dhd_rxbound = 128;\r
dhd_txbound = 64;\r
#endif\r
- conf->txbf = 1;\r
conf->frameburst = 1;\r
#ifdef BCMSDIO\r
conf->dhd_txminmax = -1;\r
conf->txinrx_thres = 128;\r
- conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;\r
+#endif\r
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))\r
conf->orphan_move = 1;\r
#else\r
conf->orphan_move = 0;\r
-#endif\r
#endif\r
}\r
\r
{\r
dhd_conf_t *conf;\r
\r
- CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
+ CONFIG_TRACE("Enter\n");\r
\r
if (dhd->conf != NULL) {\r
- printf("%s: config is attached before!\n", __FUNCTION__);\r
+ CONFIG_MSG("config is attached before!\n");\r
return 0;\r
}\r
/* Allocate private bus interface state */\r
if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) {\r
- CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__));\r
+ CONFIG_ERROR("MALLOC failed\n");\r
goto fail;\r
}\r
memset(conf, 0, sizeof(dhd_conf_t));\r
void\r
dhd_conf_detach(dhd_pub_t *dhd)\r
{\r
- CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
+ CONFIG_TRACE("Enter\n");\r
\r
if (dhd->conf) {\r
#ifdef BCMSDIO\r
#define CONFIG_COUNTRY_LIST_SIZE 100
typedef struct conf_country_list {
- uint32 count;
wl_country_t *cspec[CONFIG_COUNTRY_LIST_SIZE];
} conf_country_list_t;
};
enum in_suspend_flags {
- NO_EVENT_IN_SUSPEND = (1 << (0)),
+ NO_EVENT_IN_SUSPEND = (1 << (0)),
NO_TXDATA_IN_SUSPEND = (1 << (1)),
- AP_DOWN_IN_SUSPEND = (1 << (2)),
- ROAM_OFFLOAD_IN_SUSPEND = (1 << (3)),
+ NO_TXCTL_IN_SUSPEND = (1 << (2)),
+ AP_DOWN_IN_SUSPEND = (1 << (3)),
+ ROAM_OFFLOAD_IN_SUSPEND = (1 << (4)),
+ AP_FILTER_IN_SUSPEND = (1 << (5)),
+ WOWL_IN_SUSPEND = (1 << (6)),
+ ALL_IN_SUSPEND = 0xFFFFFFFF,
};
enum eapol_status {
EAPOL_STATUS_NONE = 0,
- EAPOL_STATUS_WPS_REQID,
- EAPOL_STATUS_WPS_RSPID,
- EAPOL_STATUS_WPS_WSC_START,
- EAPOL_STATUS_WPS_M1,
- EAPOL_STATUS_WPS_M2,
- EAPOL_STATUS_WPS_M3,
- EAPOL_STATUS_WPS_M4,
- EAPOL_STATUS_WPS_M5,
- EAPOL_STATUS_WPS_M6,
- EAPOL_STATUS_WPS_M7,
- EAPOL_STATUS_WPS_M8,
- EAPOL_STATUS_WPS_DONE,
- EAPOL_STATUS_WPA_START,
- EAPOL_STATUS_WPA_M1,
- EAPOL_STATUS_WPA_M2,
- EAPOL_STATUS_WPA_M3,
- EAPOL_STATUS_WPA_M4,
- EAPOL_STATUS_WPA_END
+ EAPOL_STATUS_REQID = 1,
+ EAPOL_STATUS_RSPID = 2,
+ EAPOL_STATUS_WSC_START = 3,
+ EAPOL_STATUS_WPS_M1 = 4,
+ EAPOL_STATUS_WPS_M2 = 5,
+ EAPOL_STATUS_WPS_M3 = 6,
+ EAPOL_STATUS_WPS_M4 = 7,
+ EAPOL_STATUS_WPS_M5 = 8,
+ EAPOL_STATUS_WPS_M6 = 9,
+ EAPOL_STATUS_WPS_M7 = 10,
+ EAPOL_STATUS_WPS_M8 = 11,
+ EAPOL_STATUS_WSC_DONE = 12,
+ EAPOL_STATUS_4WAY_START = 13,
+ EAPOL_STATUS_4WAY_M1 = 14,
+ EAPOL_STATUS_4WAY_M2 = 15,
+ EAPOL_STATUS_4WAY_M3 = 16,
+ EAPOL_STATUS_4WAY_M4 = 17,
+ EAPOL_STATUS_GROUPKEY_M1 = 18,
+ EAPOL_STATUS_GROUPKEY_M2 = 19,
+ EAPOL_STATUS_4WAY_DONE = 20
};
typedef struct dhd_conf {
uint chip;
uint chiprev;
int fw_type;
+#ifdef BCMSDIO
wl_mac_list_ctrl_t fw_by_mac;
wl_mac_list_ctrl_t nv_by_mac;
+#endif
wl_chip_nv_path_list_ctrl_t nv_by_chip;
conf_country_list_t country_list;
int band;
int roam_delta[2];
int fullroamperiod;
uint keep_alive_period;
+#ifdef ARP_OFFLOAD_SUPPORT
+ bool garp;
+#endif
int force_wme_ac;
wme_param_t wme_sta;
wme_param_t wme_ap;
- int phy_oclscdenable;
#ifdef PKT_FILTER_SUPPORT
conf_pkt_filter_add_t pkt_filter_add;
conf_pkt_filter_del_t pkt_filter_del;
int srl;
int lrl;
uint bcn_timeout;
- int txbf;
int disable_proptx;
int dhd_poll;
#ifdef BCMSDIO
int txglom_bucket_size;
int txinrx_thres;
int dhd_txminmax; // -1=DATABUFCNT(bus)
- uint sd_f2_blocksize;
bool oob_enabled_later;
- int orphan_move;
+#if defined(SDIO_ISR_THREAD)
+ bool intr_extn;
+#endif
+#ifdef BCMSDIO_RXLIM_POST
+ bool rxlim_en;
+#endif
#endif
#ifdef BCMPCIE
int bus_deepsleep_disable;
bool suspended;
#ifdef SUSPEND_EVENT
char resume_eventmask[WL_EVENTING_MASK_LEN];
+ struct ether_addr bssid_insuspend;
+ bool wlfc;
#endif
#ifdef IDHCP
int dhcpc_enable;
char isam_enable[50];
#endif
int ctrl_resched;
- int dhd_ioctl_timeout_msec;
struct mchan_params mchan[MCHAN_MAX_NUM];
char *wl_preinit;
int tsq;
+ int orphan_move;
uint eapol_status;
uint in4way;
- uint max_wait_gc_time;
+#ifdef WL_EXT_WOWL
+ uint wowl;
+#endif
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ char hw_ether[62];
+#endif
} dhd_conf_t;
#ifdef BCMSDIO
int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac);
-void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *fw_path);
-void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path);
#if defined(HW_OOB) || defined(FORCE_WOWLAN)
void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih);
#endif
void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable);
int dhd_conf_set_blksize(bcmsdh_info_t *sdh);
#endif
-void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);
-void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path);
-void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path);
-void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path);
-#ifdef CONFIG_PATH_AUTO_SELECT
-void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path);
-#endif
-int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down);
-int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down);
-uint dhd_conf_get_band(dhd_pub_t *dhd);
+void dhd_conf_set_path_params(dhd_pub_t *dhd, void *sdh, void *sih,
+ char *fw_path, char *nv_path);
+int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,
+ int def, bool down);
+int dhd_conf_get_band(dhd_pub_t *dhd);
int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec);
int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);
int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
int dhd_conf_fix_country(dhd_pub_t *dhd);
bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);
-void dhd_conf_set_wme(dhd_pub_t *dhd, int mode);
+void dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode);
void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source);
void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);
bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);
uint dhd_conf_get_chip(void *context);
uint dhd_conf_get_chiprev(void *context);
int dhd_conf_get_pm(dhd_pub_t *dhd);
-
+int dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,
+ int *hostsleep_set, int *hostsleep_val, int *ret);
+void dhd_conf_get_hostsleep(dhd_pub_t *dhd,
+ int hostsleep_set, int hostsleep_val, int ret);
+int dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,
+ char *packet, bool bcast);
+#ifdef ARP_OFFLOAD_SUPPORT
+void dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable);
+#endif
#ifdef PROP_TXSTATUS
int dhd_conf_get_disable_proptx(dhd_pub_t *dhd);
#endif
-uint dhd_conf_get_insuspend(dhd_pub_t *dhd);
+uint dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask);
int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend);
void dhd_conf_postinit_ioctls(dhd_pub_t *dhd);
int dhd_conf_preinit(dhd_pub_t *dhd);
int dhd_conf_attach(dhd_pub_t *dhd);
void dhd_conf_detach(dhd_pub_t *dhd);
void *dhd_get_pub(struct net_device *dev);
-void *dhd_get_conf(struct net_device *dev);
+int wl_pattern_atoh(char *src, char *dst);
#endif /* _dhd_config_ */
/*
* Customer code to add GPIO control during WLAN start/stop
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Platform Dependent file for Hikey
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
}
#endif /* BCMSDIO */
+#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
+int
+dhd_get_wlan_oob_gpio(void)
+{
+ return gpio_is_valid(wlan_host_wake_up) ?
+ gpio_get_value(wlan_host_wake_up) : -1;
+}
+EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
+
struct resource dhd_wlan_resources = {
.name = "bcmdhd_wlan_irq",
.start = 0, /* Dummy */
/*
* Platform Dependent file for usage of Preallocted Memory
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_custom_memprealloc.c 744015 2018-01-31 05:51:10Z $
+ * $Id: dhd_custom_memprealloc.c 805764 2019-02-20 08:46:57Z $
*/
#include <linux/device.h>
int i;
int j;
+#if !defined(CONFIG_BCMDHD_PCIE)
for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL);
if (!wlan_static_skb[i]) {
goto err_skb_alloc;
}
}
+#endif /* !CONFIG_BCMDHD_PCIE */
for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL);
/*
* Debug/trace/assert driver definitions for Dongle Host Driver.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_dbg.h 759128 2018-04-24 03:48:17Z $
+ * $Id: dhd_dbg.h 798329 2019-01-08 05:40:39Z $
*/
#ifndef _dhd_dbg_
#define _dhd_dbg_
#ifdef DHD_LOG_DUMP
-typedef enum {
- DLD_BUF_TYPE_GENERAL = 0,
- DLD_BUF_TYPE_PRESERVE,
- DLD_BUF_TYPE_SPECIAL,
- DLD_BUF_TYPE_ECNTRS,
- DLD_BUF_TYPE_FILTER,
- DLD_BUF_TYPE_ALL
-} log_dump_type_t;
extern char *dhd_log_dump_get_timestamp(void);
extern void dhd_log_dump_write(int type, char *binary_data,
int binary_len, const char *fmt, ...);
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
#define CONCISE_DUMP_BUFLEN 16 * 1024
#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n"
+#ifdef DHD_STATUS_LOGGING
+#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n"
+#endif /* DHD_STATUS_LOGGING */
+#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n"
#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n"
#endif /* DHD_LOG_DUMP */
do { \
if (dhd_msg_level & DHD_ERROR_VAL) { \
printf args; \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
#if defined(DHD_LOG_DUMP)
+#if defined(DHD_LOG_PRINT_RATE_LIMIT)
+#define DHD_FWLOG(args) \
+ do { \
+ if (dhd_msg_level & DHD_FWLOG_VAL) { \
+ if (!log_print_threshold) \
+ printf args; \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
+#else
#define DHD_FWLOG(args) \
do { \
if (dhd_msg_level & DHD_FWLOG_VAL) { \
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
+#endif // endif
#else /* DHD_LOG_DUMP */
#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
#endif /* DHD_LOG_DUMP */
#define DHD_NONE(args)
extern int dhd_msg_level;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+extern int log_print_threshold;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+
+#define DHD_RTT_MEM(args) DHD_LOG_MEM(args)
+#define DHD_RTT_ERR(args) DHD_ERROR(args)
/* Defines msg bits */
#include <dhdioctl.h>
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_dbg_ring.c 769272 2018-06-25 09:23:27Z $
+ * $Id: dhd_dbg_ring.c 792099 2018-12-03 15:45:56Z $
*/
#include <typedefs.h>
#include <osl.h>
int
dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf)
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive)
{
void *buf;
unsigned long flags = 0;
ring->state = RING_SUSPEND;
ring->rem_len = 0;
ring->sched_pull = TRUE;
+ ring->pull_inactive = pull_inactive;
DHD_DBG_RING_UNLOCK(ring->lock, flags);
return BCME_OK;
ring->rp);
/* check bounds before incrementing read ptr */
if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) {
- DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
+ DHD_ERROR(("%s: RING%d[%s] rp points out of boundary, "
"ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n",
__FUNCTION__, ring->id, ring->name, ring->wp,
ring->rp, ring->ring_size));
return BCME_OK;
}
+/*
+ * This function folds ring->lock, so callers of this function
+ * should not hold ring->lock.
+ */
int
-dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
- bool strip_header)
+dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header)
{
dhd_dbg_ring_entry_t *r_entry = NULL;
uint32 rlen = 0;
char *buf = NULL;
+ unsigned long flags;
if (!ring || !data || buf_len <= 0) {
return 0;
}
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+
/* pull from ring is allowed for inactive (suspended) ring
* in case of ecounters only, this is because, for ecounters
* when a trap occurs the ring is suspended and data is then
* pulled to dump it to a file. For other rings if ring is
* not in active state return without processing (as before)
*/
-#ifndef EWP_ECNTRS_LOGGING
- if (ring->state != RING_ACTIVE) {
- return 0;
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ goto exit;
}
-#endif /* EWP_ECNTRS_LOGGING */
if (ring->rp == ring->wp) {
- return 0;
+ goto exit;
}
DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n",
DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
" current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
ring->ring_size, ring->id, ring->name, ring->rp));
- return 0;
+ rlen = 0;
+ goto exit;
}
if (strip_header) {
__FUNCTION__, ring->id, ring->name, ring->ring_size,
ring->wp, ring->rp));
ASSERT(0);
- return 0;
+ rlen = 0;
+ goto exit;
}
memcpy(data, buf, rlen);
ring->rp += ENTRY_LENGTH(r_entry);
/* don't pass wp but skip padding if there is one */
if (ring->rp != ring->wp &&
- ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) {
+ ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) {
DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
__FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
ring->rp = 0;
" rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
ring->name, ring->rp, ring->ring_size));
ASSERT(0);
- return 0;
+ rlen = 0;
+ goto exit;
}
ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
+exit:
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
return rlen;
}
dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr)
{
int32 r_len, total_r_len = 0;
+ unsigned long flags;
if (!ring || !data)
return 0;
- if (ring->state != RING_ACTIVE)
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
return 0;
+ }
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
while (buf_len > 0) {
r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr);
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_dbg_ring.h 754183 2018-03-26 11:15:16Z $
+ * $Id: dhd_dbg_ring.h 795094 2018-12-17 08:56:58Z $
*/
#ifndef __DHD_DBG_RING_H__
uint32 ring_size; /* numbers of item in ring */
uint32 wp; /* write pointer */
uint32 rp; /* read pointer */
+ uint32 rp_tmp; /* tmp read pointer */
uint32 log_level; /* log_level */
uint32 threshold; /* threshold bytes */
void * ring_buf; /* pointer of actually ring buffer */
bool tail_padded; /* writer does not have enough space */
uint32 rem_len; /* number of bytes from wp_pad to end */
bool sched_pull; /* schedule reader immediately */
+ bool pull_inactive; /* pull contents from ring even if it is inactive */
} dhd_dbg_ring_t;
#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
#define PAYLOAD_MAX_LEN 65535
#define PAYLOAD_ECNTR_MAX_LEN 1648u
+#define PAYLOAD_RTT_MAX_LEN 1648u
#define PENDING_LEN_MAX 0xFFFFFFFF
#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
typedef void (*os_pullreq_t)(void *os_priv, const int ring_id);
int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf);
+ uint32 ring_sz, void *allocd_buf, bool pull_inactive);
void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring);
int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data);
int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_debug.c 771435 2018-07-10 05:35:24Z $
+ * $Id: dhd_debug.c 823976 2019-06-06 11:39:07Z $
*/
#include <typedefs.h>
};
#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06
+void print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr);
int
dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
{
return ret;
}
+dhd_dbg_ring_t *
+dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id)
+{
+ if (!dhdp || !dhdp->dbg) {
+ return NULL;
+ }
+
+ if (!VALID_RING(ring_id)) {
+ DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
+ return NULL;
+ }
+
+ return &dhdp->dbg->dbg_rings[ring_id];
+}
+
int
dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
bool strip_header)
}
}
+/* To identify format of non string format types */
+bool
+check_valid_non_string_format(char *curr_ptr)
+{
+ char *next_ptr;
+ char *next_fmt_stptr;
+ char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u',
+ 'f', 'F', 'e', 'E', 'g', 'G', 'o',
+ 'a', 'A', 'n'};
+ int i;
+ bool valid = FALSE;
+
+ /* Check for next % in the fmt str */
+ next_fmt_stptr = bcmstrstr(curr_ptr, "%");
+
+ for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) {
+ for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) {
+ if (*next_ptr == valid_fmt_types[i]) {
+ /* Check whether format type found corresponds to current %
+ * and not the next one, if exists.
+ */
+ if ((next_fmt_stptr == NULL) ||
+ (next_fmt_stptr && (next_ptr < next_fmt_stptr))) {
+ /* Not validating for length/width fields in
+ * format specifier.
+ */
+ valid = TRUE;
+ }
+ goto done;
+ }
+ }
+ }
+
+done:
+ return valid;
+}
+
#define MAX_NO_OF_ARG 16
-#define FMTSTR_SIZE 132
-#define ROMSTR_SIZE 200
+#define FMTSTR_SIZE 200
+#define ROMSTR_SIZE 268
#define SIZE_LOC_STR 50
#define LOG_PRINT_CNT_MAX 16u
#define EL_PARSE_VER "V02"
#define EL_MSEC_PER_SEC 1000
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+#define MAX_LOG_PRINT_COUNT 100u
+#define LOG_PRINT_THRESH (1u * USEC_PER_SEC)
+#endif // endif
bool
dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr)
*
*/
prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ?
- *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET :
- *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_OFFSET;
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) :
+ *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET);
/* update event log data pointer address */
prcd_log_hdr->log_ptr =
u_arg arg[MAX_NO_OF_ARG] = {{0}};
char *c_ptr = NULL;
struct bcmstrbuf b;
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ static int log_print_count = 0;
+ static uint64 ts0 = 0;
+ uint64 ts1 = 0;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
BCM_REFERENCE(arg);
- /* print the message out in a logprint */
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ if (!ts0)
+ ts0 = OSL_SYSUPTIME_US();
+
+ ts1 = OSL_SYSUPTIME_US();
+
+ if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) {
+ log_print_threshold = 1;
+ ts0 = 0;
+ log_print_count = 0;
+ DHD_ERROR(("%s: Log print water mark is reached,"
+ " console logs are dumped only to debug_dump file\n", __FUNCTION__));
+ } else if ((ts1 - ts0) > LOG_PRINT_THRESH) {
+ log_print_threshold = 0;
+ ts0 = 0;
+ log_print_count = 0;
+ }
+
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
+ /* print the message out in a logprint. Logprint expects raw format number */
if (!(raw_event->fmts)) {
if (dhdp->dbg) {
log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
(uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
plog_hdr->tag,
plog_hdr->count,
- plog_hdr->fmt_num));
+ plog_hdr->fmt_num_raw));
for (count = 0; count < (plog_hdr->count - 1); count++) {
if (count && (count % LOG_PRINT_CNT_MAX == 0)) {
EL_PARSE_VER, logset, block,
plog_hdr->tag,
plog_hdr->count,
- plog_hdr->fmt_num);
+ plog_hdr->fmt_num_raw);
for (count = 0; count < (plog_hdr->count - 1); count++) {
bcm_bprintf(&b, " %x", log_ptr[count]);
}
/* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < WL_MAX_PRESERVE_BUFFER &&
+ if (logset < dhdp->event_log_max_sets &&
((0x01u << logset) & dhdp->logset_prsrv_mask)) {
DHD_PRSRV_MEM(("%s\n", b.origbuf));
} else {
- DHD_EVENT(("%s\n", b.origbuf));
+ DHD_FWLOG(("%s\n", b.origbuf));
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
}
}
return;
"(s)0x%x", log_ptr[count]);
arg[count].addr = str_buf[count];
}
- } else {
- /* Other than string */
+ } else if (check_valid_non_string_format(c_ptr)) {
+ /* Other than string format */
arg[count].val = log_ptr[count];
+ } else {
+ *(c_ptr - 1) = '\0';
+ break;
}
}
}
/* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < WL_MAX_PRESERVE_BUFFER &&
+ if (logset < dhdp->event_log_max_sets &&
((0x01u << logset) & dhdp->logset_prsrv_mask)) {
DHD_PRSRV_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
arg[11], arg[12], arg[13], arg[14], arg[15]));
} else {
- DHD_EVENT((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
arg[11], arg[12], arg[13], arg[14], arg[15]));
+#ifdef DHD_LOG_PRINT_RATE_LIMIT
+ log_print_count++;
+#endif /* DHD_LOG_PRINT_RATE_LIMIT */
}
exit:
struct tracelog_header *logentry_header;
uint ring_data_len = 0;
bool ecntr_pushed = FALSE;
+ bool rtt_pushed = FALSE;
+ bool dll_inited = FALSE;
uint32 logset = 0;
uint16 block = 0;
+ bool event_log_max_sets_queried;
+ uint32 event_log_max_sets;
uint min_expected_len = 0;
-#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
uint16 len_chk = 0;
-#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */
BCM_REFERENCE(ecntr_pushed);
+ BCM_REFERENCE(rtt_pushed);
+ BCM_REFERENCE(len_chk);
+
+ /* store event_logset_queried and event_log_max_sets in local variables
+ * to avoid race conditions as they were set from different contexts(preinit)
+ */
+ event_log_max_sets_queried = dhdp->event_log_max_sets_queried;
+ /* Make sure queried is read first with wmb and then max_sets,
+ * as it is done in reverse order during preinit ioctls.
+ */
+ OSL_SMP_WMB();
+ event_log_max_sets = dhdp->event_log_max_sets;
if (msgtrace_hdr_present)
min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN);
hdr = (msgtrace_hdr_t *)event_data;
data = (char *)event_data + MSGTRACE_HDRLEN;
datalen -= MSGTRACE_HDRLEN;
- msgtrace_seqnum = hdr->seqnum;
+ msgtrace_seqnum = ntoh32(hdr->seqnum);
} else {
data = (char *)event_data;
}
- if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(msgtrace_seqnum)))
+ if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum))
return;
/* Save the whole message to event log ring */
logset = ltoh32(*((uint32 *)(data + 4)));
- if (logset >= NUM_EVENT_LOG_SETS) {
- DHD_ERROR(("%s logset: %d max: %d out of range, collect socram\n",
- __FUNCTION__, logset, NUM_EVENT_LOG_SETS));
+ if (logset >= event_log_max_sets) {
+ DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n",
+ __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried));
#ifdef DHD_FW_COREDUMP
- dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE;
- dhd_bus_mem_dump(dhdp);
+ if (event_log_max_sets_queried) {
+ DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n",
+ __FUNCTION__));
+ dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE;
+ dhd_bus_mem_dump(dhdp);
+ }
#endif /* DHD_FW_COREDUMP */
}
* data log_hdr
*/
dll_init(&list_head);
+ dll_inited = TRUE;
while (datalen > log_hdr_len) {
log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
if (prcd_log_hdr.count == 0) {
break;
}
+ /* Both tag_stats and proxd are binary payloads so skip
+ * argument count check for these.
+ */
if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ (prcd_log_hdr.tag != EVENT_LOG_TAG_ROAM_ENHANCED_LOG) &&
(prcd_log_hdr.count > MAX_NO_OF_ARG)) {
break;
}
log_item->prcd_log_hdr.tag = prcd_log_hdr.tag;
log_item->prcd_log_hdr.count = prcd_log_hdr.count;
log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num;
+ log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw;
log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle;
log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr;
log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len;
plog_hdr = &log_item->prcd_log_hdr;
#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
+ /* Ecounter tag can be time_data or log_stats+binary paloaod */
if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) ||
((plog_hdr->tag == EVENT_LOG_TAG_STATS) &&
(plog_hdr->binary_payload))) {
}
#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */
+ if (plog_hdr->tag == EVENT_LOG_TAG_ROAM_ENHANCED_LOG) {
+ print_roam_enhanced_log(plog_hdr);
+ msg_processed = TRUE;
+ }
+#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP)
+ if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
+ plog_hdr->binary_payload) {
+ if (!rtt_pushed && dhd_log_dump_rtt_enabled()) {
+ /*
+ * check msg hdr len before pushing.
+ * FW msg_hdr.len includes length of event log hdr,
+ * logentry header and payload.
+ */
+ len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
+ PAYLOAD_RTT_MAX_LEN);
+ /* account extended event log header(extended_event_log_hdr) */
+ if (plog_hdr->ext_event_log_hdr) {
+ len_chk += sizeof(*log_hdr);
+ }
+ if (msg_hdr.len > len_chk) {
+ DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
+ "msg_hdr.len=%u, max allowed for ecntrs=%u\n",
+ __FUNCTION__, msg_hdr.len, len_chk));
+ goto exit;
+ }
+ dhd_dbg_ring_push(dhdp->rtt_dbg_ring, &msg_hdr, logbuf);
+ rtt_pushed = TRUE;
+ }
+ }
+#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */
+
if (!msg_processed) {
dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr,
logset, block, (uint32 *)data);
BCM_REFERENCE(log_hdr);
exit:
- while (!dll_empty(&list_head)) {
+ while (dll_inited && (!dll_empty(&list_head))) {
cur = dll_head_p(&list_head);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
return BCME_OK;
}
+int
+__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status)
+{
+ dhd_dbg_ring_status_t ring_status;
+ int ret = BCME_OK;
+
+ if (ring == NULL) {
+ return BCME_BADADDR;
+ }
+
+ bzero(&ring_status, sizeof(dhd_dbg_ring_status_t));
+ RING_STAT_TO_STATUS(ring, ring_status);
+ *get_ring_status = ring_status;
+
+ return ret;
+}
+
/*
* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
* Return: An error code or 0 on success.
int id = 0;
dhd_dbg_t *dbg;
dhd_dbg_ring_t *dbg_ring;
- dhd_dbg_ring_status_t ring_status;
if (!dhdp || !dhdp->dbg)
return BCME_BADADDR;
dbg = dhdp->dbg;
- memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t));
for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
dbg_ring = &dbg->dbg_rings[id];
if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
- RING_STAT_TO_STATUS(dbg_ring, ring_status);
- *dbg_ring_status = ring_status;
+ __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status);
break;
}
}
return ret;
}
+#ifdef SHOW_LOGTRACE
+void
+dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info)
+{
+ dhd_dbg_ring_status_t ring_status;
+ uint32 rlen = 0;
+
+ rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE);
+
+ trace_buf_info->size = rlen;
+ trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
+ if (rlen == 0) {
+ trace_buf_info->availability = BUF_NOT_AVAILABLE;
+ return;
+ }
+
+ __dhd_dbg_get_ring_status(ring, &ring_status);
+
+ if (ring_status.written_bytes != ring_status.read_bytes) {
+ trace_buf_info->availability = NEXT_BUF_AVAIL;
+ }
+}
+#endif /* SHOW_LOGTRACE */
+
/*
* dhd_dbg_find_ring_id : return ring_id based on ring_name
* Return: An invalid ring id for failure or valid ring id on success.
uint32
__dhd_dbg_driver_ts_usec(void)
{
- struct timespec ts;
+ struct osl_timespec ts;
- get_monotonic_boottime(&ts);
+ osl_get_monotonic_boottime(&ts);
return ((uint32)(__TIMESPEC_TO_US(ts)));
}
DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__));
return BCME_OK;
}
+#endif /* DBG_PKT_MON */
+
+#if defined(DBG_PKT_MON)
bool
dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
uint16 status)
}
return pkt_fate;
}
-
-#else /* DBG_PKT_MON */
-
+#else /* DBG_PKT_MON || DHD_PKT_LOGGING */
bool
dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
uint32 pktid, uint16 status)
{
return TRUE;
}
+#endif // endif
-#endif /* DBG_PKT_MON */
+#define EL_LOG_STR_LEN 512
+
+void
+print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr)
+{
+ prsv_periodic_log_hdr_t *hdr = (prsv_periodic_log_hdr_t *)plog_hdr->log_ptr;
+ char chanspec_buf[CHANSPEC_STR_LEN];
+
+ if (hdr->version != ROAM_LOG_VER_1) {
+ DHD_ERROR(("ROAM_LOG ENHANCE: version is not matched\n"));
+ goto default_print;
+ return;
+ }
+
+ switch (hdr->id) {
+ case ROAM_LOG_SCANSTART:
+ {
+ roam_log_trig_v1_t *log = (roam_log_trig_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_SCANSTART time: %d,"
+ " version:%d reason: %d rssi:%d cu:%d result:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->reason,
+ log->rssi, log->current_cu, log->result));
+ if (log->reason == WLC_E_REASON_DEAUTH ||
+ log->reason == WLC_E_REASON_DISASSOC) {
+ DHD_ERROR((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n",
+ log->prt_roam.rcvd_reason));
+ } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) {
+ DHD_ERROR((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d "
+ "duration disassoc:%d valid:%d term:%d\n",
+ log->bss_trans.req_mode, log->bss_trans.nbrlist_size,
+ log->bss_trans.token, log->bss_trans.disassoc_dur,
+ log->bss_trans.validity_dur, log->bss_trans.bss_term_dur));
+ }
+ break;
+ }
+ case ROAM_LOG_SCAN_CMPLT:
+ {
+ int i;
+ roam_log_scan_cmplt_v1_t *log =
+ (roam_log_scan_cmplt_v1_t *)plog_hdr->log_ptr;
+
+ DHD_ERROR(("ROAM_LOG_SCAN_CMPL: time:%d version:%d"
+ "is_full:%d scan_count:%d score_delta:%d",
+ plog_hdr->armcycle, log->hdr.version, log->full_scan,
+ log->scan_count, log->score_delta));
+ DHD_ERROR((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n",
+ MAC2STRDBG((uint8 *)&log->cur_info.addr),
+ log->cur_info.rssi,
+ log->cur_info.score,
+ wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf)));
+ for (i = 0; i < log->scan_list_size; i++) {
+ DHD_ERROR((" ROAM_LOG_CANDIDATE %d: " MACDBG
+ "rssi:%d score:%d channel:%s TPUT:%dkbps\n",
+ i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr),
+ log->scan_list[i].rssi, log->scan_list[i].score,
+ wf_chspec_ntoa_ex(log->scan_list[i].chanspec,
+ chanspec_buf),
+ log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT?
+ log->scan_list[i].estm_tput:0));
+ }
+ break;
+ }
+ case ROAM_LOG_ROAM_CMPLT:
+ {
+ roam_log_cmplt_v1_t *log = (roam_log_cmplt_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_ROAM_CMPL: time: %d, version:%d"
+ "status: %d reason: %d channel:%s retry:%d " MACDBG "\n",
+ plog_hdr->armcycle, log->hdr.version, log->status, log->reason,
+ wf_chspec_ntoa_ex(log->chanspec, chanspec_buf),
+ log->retry, MAC2STRDBG((uint8 *)&log->addr)));
+ break;
+ }
+ case ROAM_LOG_NBR_REQ:
+ {
+ roam_log_nbrreq_v1_t *log = (roam_log_nbrreq_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_NBR_REQ: time: %d, version:%d token:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->token));
+ break;
+ }
+ case ROAM_LOG_NBR_REP:
+ {
+ roam_log_nbrrep_v1_t *log = (roam_log_nbrrep_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n",
+ plog_hdr->armcycle, log->hdr.version, log->channel_num));
+ break;
+ }
+ case ROAM_LOG_BCN_REQ:
+ {
+ roam_log_bcnrpt_req_v1_t *log =
+ (roam_log_bcnrpt_req_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d"
+ "class:%d num_chan:%d ",
+ plog_hdr->armcycle, log->hdr.version,
+ log->result, log->reg, log->channel));
+ DHD_ERROR(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d"
+ "ssid_len:%d\n", log->mode, log->bssid_wild,
+ log->duration, log->ssid_len));
+ break;
+ }
+ case ROAM_LOG_BCN_REP:
+ {
+ roam_log_bcnrpt_rep_v1_t *log =
+ (roam_log_bcnrpt_rep_v1_t *)plog_hdr->log_ptr;
+ DHD_ERROR(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d\n",
+ plog_hdr->armcycle, log->hdr.version,
+ log->count));
+ break;
+ }
+ default:
+ goto default_print;
+ }
+
+ return;
+
+default_print:
+ {
+ uint32 *ptr = (uint32 *)plog_hdr->log_ptr;
+ int i;
+ int loop_cnt = hdr->length / sizeof(uint32);
+ struct bcmstrbuf b;
+ char pr_buf[EL_LOG_STR_LEN] = { 0 };
+
+ bcm_binit(&b, pr_buf, EL_LOG_STR_LEN);
+ bcm_bprintf(&b, "ROAM_LOG_UNKNOWN ID:%d ver:%d armcycle:%d",
+ hdr->id, hdr->version, plog_hdr->armcycle);
+ for (i = 0; i < loop_cnt && b.size > 0; i++) {
+ bcm_bprintf(&b, " %x", *ptr);
+ ptr++;
+ }
+ DHD_ERROR(("%s\n", b.origbuf));
+ }
+}
/*
* dhd_dbg_attach: initialziation of dhd dbugability module
if (!buf)
goto error;
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
- (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf);
+ (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE);
if (ret)
goto error;
if (!buf)
goto error;
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
- (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf);
+ (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE);
if (ret)
goto error;
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_debug.h 771435 2018-07-10 05:35:24Z $
+ * $Id: dhd_debug.h 783721 2018-10-08 13:05:26Z $
*/
#ifndef _dhd_debug_h_
} dhd_dbg_mwli_t;
#endif /* DHD_DEBUG */
+#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF
+
/* LSB 2 bits of format number to identify the type of event log */
#define DHD_EVENT_LOG_HDR_MASK 0x3
extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id,
int log_level, int flags, uint32 threshold);
extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name);
+extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id);
extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp);
extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len);
extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
bool strip_header);
int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr,
void *data);
+int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status);
int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
dhd_dbg_ring_status_t *dbg_ring_status);
+#ifdef SHOW_LOGTRACE
+void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info);
+#endif /* SHOW_LOGTRACE */
#ifdef DBG_PKT_MON
extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
goto exit;
}
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen);
+ DHD_DBG_RING_LOCK(ring->lock, flags);
if (!ring->sched_pull) {
ring->sched_pull = TRUE;
* Flow rings are transmit traffic (=propagating towards antenna) related entities
*
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_flowring.c 765578 2018-06-04 17:10:24Z $
+ * $Id: dhd_flowring.c 808473 2019-03-07 07:35:30Z $
*/
#include <typedefs.h>
#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
-#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
-const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
-#else
const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
-#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
}
}
+uint8
+dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
+{
+ uint8 prio_count = 0;
+ int i;
+ // Pick all elements one by one
+ for (i = 0; i < NUMPRIO; i++)
+ {
+ // Check if the picked element is already counted
+ int j;
+ for (j = 0; j < i; j++) {
+ if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
+ break;
+ }
+ }
+ // If not counted earlier, then count it
+ if (i == j) {
+ prio_count++;
+ }
+ }
+
+#ifdef DHD_LOSSLESS_ROAMING
+ /* For LLR, we are using flowring with prio 7 which is not considered
+ * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
+ * prio to PRIO_8021D_NC and send to dhd_flowid_update.
+ * So add 1 to prio_count.
+ */
+ prio_count++;
+#endif /* DHD_LOSSLESS_ROAMING */
+
+ return prio_count;
+}
+
+uint8
+dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
+{
+ uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
+ uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
+ uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
+ return max_multi_client_flow_rings;
+}
+
/** Initializes data structures of multiple flow rings */
int
dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+ dhdp->multi_client_flow_rings = 0U;
+
#ifdef DHD_LOSSLESS_ROAMING
dhdp->dequeue_prec_map = ALLPRIO;
#endif // endif
dhdp->num_flow_rings = 0U;
bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ dhdp->max_multi_client_flow_rings = 0U;
+ dhdp->multi_client_flow_rings = 0U;
+
lock = dhdp->flowid_lock;
dhdp->flowid_lock = NULL;
ASSERT(if_flow_lkup);
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
is_tdls_destination(dhdp, da)) {
if (flowid == FLOWID_INVALID) {
MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
- DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+ DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
return FLOWID_INVALID;
}
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
- /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
+ if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+ /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count &&
(is_tdls_destination(dhdp, da))) {
flow_ring_table_t *flow_ring_table;
unsigned long flags;
int ret;
- bool is_sta_assoc;
DHD_TRACE(("%s\n", __FUNCTION__));
id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
-
+ bool if_role_multi_client;
if_flow_lkup_t *if_flow_lkup;
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
if (!if_flow_lkup[ifindex].status)
return BCME_ERROR;
- BCM_REFERENCE(is_sta_assoc);
+
+ /* check role for multi client case */
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
+ /* Abort Flowring creation if multi client flowrings crossed the threshold */
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ if (if_role_multi_client &&
+ (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
+ DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
+ __FUNCTION__, dhdp->multi_client_flow_rings,
+ dhdp->max_multi_client_flow_rings));
+ return BCME_ERROR;
+ }
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ /* Do not create Flowring if peer is not associated */
#if defined(PCIE_FULL_DONGLE)
- is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
- DHD_ERROR_RLMT(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
- ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
- is_sta_assoc));
- if (!ETHER_ISMULTI(da) &&
- ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
- (FALSE) ||
-#ifdef WL_NAN
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_NAN) ||
-#endif /* WL_NAN */
- (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
- (!is_sta_assoc)) {
- DHD_ERROR_RLMT(("Attempt to send pkt with out peer/scb addition\n"));
+ if (if_role_multi_client && !ETHER_ISMULTI(da) &&
+ !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
+ DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
return BCME_ERROR;
}
#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
- DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+ DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
__FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
return BCME_ERROR;
}
ASSERT(id < dhdp->num_flow_rings);
+ /* Only after flowid alloc, increment multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings++;
+ }
+
/* register this flowid in dhd_pub */
dhd_add_flowid(dhdp, ifindex, prio, da, id);
return BCME_OK;
} else {
/* if the Flow id was found in the hash */
- ASSERT(id < dhdp->num_flow_rings);
+
+ if (id >= dhdp->num_flow_rings) {
+ DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
+ __FUNCTION__, id, dhdp->num_flow_rings));
+ *flowid = FLOWID_INVALID;
+ ASSERT(0);
+ return BCME_ERROR;
+ }
flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
flow_hash_info_t *cur, *prev;
if_flow_lkup_t *if_flow_lkup;
unsigned long flags;
+ bool if_role_multi_client;
ASSERT(ifindex < DHD_MAX_IFS);
if (ifindex >= DHD_MAX_IFS)
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
+
for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
cur = if_flow_lkup[ifindex].fl_hash[hashix];
prev->next = cur->next;
}
+ /* Decrement multi_client_flow_rings */
+ if (if_role_multi_client) {
+ dhdp->multi_client_flow_rings--;
+ }
+
/* deregister flowid from dhd_pub. */
dhd_del_flowid(dhdp, ifindex, flowid);
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
for (id = 0; id < dhdp->num_flow_rings; id++) {
+ /*
+ * Send flowring delete request even if flowring status is
+ * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
+ * event comes ahead of flowring create response.
+ * Otherwise the flowring will not be deleted later as there will not be any
+ * DISASSOC_IND event. With this change, when create response event comes to DHD,
+ * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
+ * event will come, upon which DHD will delete the flowring.
+ */
if (flow_ring_table[id].active &&
(flow_ring_table[id].flow_info.ifindex == ifindex) &&
(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
- (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+ ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
+ (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
DHD_ERROR(("%s: deleting flowid %d\n",
__FUNCTION__, flow_ring_table[id].flowid));
dhd_bus_flow_ring_delete_request(dhdp->bus,
else
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+ dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
+
return BCME_OK;
}
* Provides type definitions and function prototypes used to create, delete and manage flow rings at
* high level.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_flowring.h 761412 2018-05-08 05:34:36Z $
+ * $Id: dhd_flowring.h 786596 2018-10-26 22:54:51Z $
*/
/****************
#define FLOW_RING_STATUS_STA_FREEING 7
#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
-/* Maximum Mu MIMO frame size */
-#ifdef WL_MONITOR
-#define DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ 4096
-#endif /* WL_MONITOR */
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096
#define DHD_FLOW_PRIO_AC_MAP 0
#define DHD_FLOW_PRIO_TID_MAP 1
#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
+#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT)
#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS)
+#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS)
+#ifdef WL_NAN
+#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN)
+#else
+#define DHD_IF_ROLE_NAN(pub, idx) (FALSE)
+#endif /* WL_NAN */
+#define DHD_IF_ROLE_AWDL(pub, idx) (FALSE)
+
+#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \
+ (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx))
+
+#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \
+ (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\
+ DHD_IF_ROLE_NAN(pub, idx))
+
#define DHD_FLOW_RING(dhdp, flowid) \
(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
#ifdef IDLE_TX_FLOW_MGMT
uint64 last_active_ts; /* contains last active timestamp */
#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DHD_HP2P
+ bool hp2p_ring;
+#endif /* DHD_HP2P */
} flow_ring_node_t;
typedef flow_ring_node_t flow_ring_table_t;
#include <dhd_linux.h>
#include <linux/gpio.h>
-#ifdef CUSTOMER_HW_PLATFORM
-#include <plat/sdhci.h>
-#define sdmmc_channel sdmmc_device_mmc0
-#endif /* CUSTOMER_HW_PLATFORM */
-
#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO)
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#endif
#endif
#if defined(BUS_POWER_RESTORE)
-#if defined(BCMSDIO)
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
+ mdelay(100);
printf("======== mmc_power_restore_host! ========\n");
mmc_power_restore_host(adapter->sdio_func->card->host);
}
#elif defined(BCMPCIE)
- OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */
if (adapter->pci_dev) {
+ mdelay(100);
printf("======== pci_set_power_state PCI_D0! ========\n");
pci_set_power_state(adapter->pci_dev, PCI_D0);
if (adapter->pci_saved_state)
mdelay(100);
} else {
#if defined(BUS_POWER_RESTORE)
-#if defined(BCMSDIO)
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
printf("======== mmc_power_save_host! ========\n");
mmc_power_save_host(adapter->sdio_func->card->host);
#ifdef CUSTOMER_HW_AMLOGIC
sdio_reinit();
#endif
+#elif defined(BCMPCIE)
+ printf("======== Card detection to detect PCIE card! ========\n");
#endif
} else {
#if defined(BCMSDIO)
#endif
#elif defined(BCMPCIE)
printf("======== Card detection to remove PCIE card! ========\n");
+#ifdef CUSTOMER_HW_AMLOGIC
extern_wifi_set_enable(0);
mdelay(200);
+#endif
#endif
}
#endif /* BUS_POWER_RESTORE */
/*
* IP Packet Parser Module.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_ip.c 729083 2017-10-30 10:33:47Z $
+ * $Id: dhd_ip.c 813282 2019-04-04 09:42:28Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <dhd_ip.h>
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <bcmtcp.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
/* special values */
/* 802.3 llc/snap header */
uint8 supp_cnt;
dhd_pub_t *dhdp;
#ifndef TCPACK_SUPPRESS_HOLD_HRT
- struct timer_list timer;
+ timer_list_compat_t timer;
#else
struct tasklet_hrtimer timer;
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
#ifdef BCMPCIE
#ifndef TCPACK_SUPPRESS_HOLD_HRT
-static void dhd_tcpack_send(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- ulong data
-#endif
-)
+static void dhd_tcpack_send(ulong data)
#else
static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer)
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
unsigned long flags;
#ifndef TCPACK_SUPPRESS_HOLD_HRT
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- cur_tbl = from_timer(cur_tbl, t, timer);
-#else
cur_tbl = (tcpack_info_t *)data;
-#endif
#else
cur_tbl = container_of(timer, tcpack_info_t, timer.timer);
#endif /* TCPACK_SUPPRESS_HOLD_HRT */
&tcpack_sup_module->tcpack_info_tbl[i];
tcpack_info_tbl->dhdp = dhdp;
#ifndef TCPACK_SUPPRESS_HOLD_HRT
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&tcpack_info_tbl->timer, dhd_tcpack_send, 0);
-#else
- init_timer(&tcpack_info_tbl->timer);
- tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl;
- tcpack_info_tbl->timer.function = dhd_tcpack_send;
-#endif
+ init_timer_compat(&tcpack_info_tbl->timer,
+ dhd_tcpack_send, tcpack_info_tbl);
#else
tasklet_hrtimer_init(&tcpack_info_tbl->timer,
dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return hold;
}
#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+tcp_hdr_flag_t
+dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *ether_hdr; /* Ethernet header of the new packet */
+ uint16 ether_type; /* Ethernet type of the new packet */
+ uint8 *ip_hdr; /* IP header of the new packet */
+ uint8 *tcp_hdr; /* TCP header of the new packet */
+ uint32 ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint8 flags;
+
+ ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+ if (ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, ether_type));
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr = ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ if (cur_framelen < IPV4_MIN_HEADER_LEN) {
+ return FLAG_OTHERS;
+ }
+
+ ip_hdr_len = IPV4_HLEN(ip_hdr);
+ if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+ return FLAG_OTHERS;
+ }
+
+ tcp_hdr = ip_hdr + ip_hdr_len;
+
+ flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET];
+
+ if (flags & TCP_FLAG_SYN) {
+ if (flags & TCP_FLAG_ACK) {
+ return FLAG_SYNCACK;
+ }
+ return FLAG_SYNC;
+ }
+ return FLAG_OTHERS;
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
*
* Provides type definitions and function prototypes used to parse ip packet.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $
+ * $Id: dhd_ip.h 790572 2018-11-26 11:03:46Z $
*/
#ifndef _dhd_ip_h_
#define _dhd_ip_h_
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dngl_stats.h>
#include <bcmutils.h>
#include <dhd.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
typedef enum pkt_frag
{
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+#ifdef DHDTCPSYNC_FLOOD_BLK
+typedef enum tcp_hdr_flags {
+ FLAG_SYNC,
+ FLAG_SYNCACK,
+ FLAG_RST,
+ FLAG_OTHERS
+} tcp_hdr_flag_t;
+
+extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux.c 771880 2018-07-12 07:25:59Z $
+ * $Id: dhd_linux.c 822756 2019-05-30 13:20:26Z $
*/
#include <typedefs.h>
#include <linux/ethtool.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <linux/ip.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cpufreq.h>
#endif /* ENABLE_ADAPTIVE_SCHED */
#include <linux/rtc.h>
-#ifdef DHD_DUMP_MNGR
#include <linux/namei.h>
-#endif /* DHD_DUMP_MNGR */
#include <asm/uaccess.h>
#include <asm/unaligned.h>
+#include <dhd_linux_priv.h>
#include <epivers.h>
#include <bcmutils.h>
#include <vlan.h>
#include <802.3.h>
-#include <dngl_stats.h>
#include <dhd_linux_wq.h>
#include <dhd.h>
#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
#ifdef DHD_WET
#include <dhd_wet.h>
#endif /* DHD_WET */
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_config.h>
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
#include <dhd_dbg.h>
#include <dhd_dbg_ring.h>
#include <dhd_debug.h>
#include <802.1d.h>
#endif /* AMPDU_VO_ENABLE */
-#ifdef DHDTCPACK_SUPPRESS
+#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
#include <dhd_ip.h>
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
#include <dhd_daemon.h>
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+#include <eapol.h>
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef DHD_DEBUG_PAGEALLOC
typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
#define IP_PROT_RESERVED 0xFF
-#if defined(DHD_LB)
-#if !defined(PCIE_FULL_DONGLE)
-#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
-#endif /* !PCIE_FULL_DONGLE */
-#endif /* DHD_LB */
-
-#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
- defined(DHD_LB_STATS)
-#if !defined(DHD_LB)
-#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
-#endif /* !DHD_LB */
-#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
-
-#if defined(DHD_LB)
-/* Dynamic CPU selection for load balancing */
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/notifier.h>
-#include <linux/workqueue.h>
-#include <asm/atomic.h>
-
-#if !defined(DHD_LB_PRIMARY_CPUS)
-#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
-#endif // endif
-#if !defined(DHD_LB_SECONDARY_CPUS)
-#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
-#endif // endif
-
-#define HIST_BIN_SIZE 9
-
-static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
-
-#if defined(DHD_LB_TXP)
-static void dhd_lb_tx_handler(unsigned long data);
-static void dhd_tx_dispatcher_work(struct work_struct * work);
-static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
-static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
-
-/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
-typedef struct dhd_tx_lb_pkttag_fr {
- struct net_device *net;
- int ifidx;
-} dhd_tx_lb_pkttag_fr_t;
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct * work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
-#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
-
-#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
-#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
-#endif /* DHD_LB_TXP */
-#endif /* DHD_LB */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
#ifdef WL_NATOE
#include <dhd_linux_nfct.h>
#endif /* WL_NATOE */
-#ifdef WL_MONITOR
-#include <bcmmsgbuf.h>
-#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
-#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
-#endif /* WL_MONITOR */
-
-#define htod32(i) (i)
-#define htod16(i) (i)
-#define dtoh32(i) (i)
-#define dtoh16(i) (i)
-#define htodchanspec(i) (i)
-#define dtohchanspec(i) (i)
-
-#ifdef BLOCK_IPV6_PACKET
-#define HEX_PREF_STR "0x"
-#define UNI_FILTER_STR "010000000000"
-#define ZERO_ADDR_STR "000000000000"
-#define ETHER_TYPE_STR "0000"
-#define IPV6_FILTER_STR "20"
-#define ZERO_TYPE_STR "00"
-#endif /* BLOCK_IPV6_PACKET */
-
#if defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
#endif // endif
-extern void dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname,
- char *dump_data, bool direction);
-
#ifdef FIX_CPU_MIN_CLOCK
#include <linux/pm_qos.h>
#endif /* FIX_CPU_MIN_CLOCK */
#endif // endif
#include <wl_android.h>
-#ifdef WL_ESCAN
-#include <wl_escan.h>
-#endif
/* Maximum STA per radio */
#define DHD_MAX_STA 32
static bool dhd_inet6addr_notifier_registered = FALSE;
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#endif /* defined(CONFIG_PM_SLEEP) */
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
-static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
-#endif // endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+static void dhd_hang_process(struct work_struct *work_data);
MODULE_LICENSE("GPL and additional rights");
-#endif /* LinuxVer */
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
DEFINE_MUTEX(_dhd_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
-#endif
+#endif
#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
#define MAX_CONSECUTIVE_HANG_COUNTS 5
extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
extern uint sd_f2_blocksize;
extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
-
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
-const char *
-print_tainted()
-{
- return "";
-}
-#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <linux/nl80211.h>
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
-#if defined(BCMPCIE)
-extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
-#else
-extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
-#endif /* OEM_ANDROID && BCMPCIE */
-
-#ifdef PKT_FILTER_SUPPORT
-extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
-extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
-#endif // endif
-
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
u8* program, uint32 program_len);
static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
#endif /* PKT_FILTER_SUPPORT && APF */
-#if defined(ARGOS_NOTIFY_CB)
-int argos_register_notifier_init(struct net_device *net);
-int argos_register_notifier_deinit(void);
-
-extern int sec_argos_register_notifier(struct notifier_block *n, char *label);
-extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label);
-
-static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
- unsigned long speed, void *v);
-static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
- unsigned long speed, void *v);
-
-/* PCIe interrupt affinity threshold (Mbps) */
-#define PCIE_IRQ_AFFINITY_THRESHOLD 300
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
+#if defined(ARGOS_NOTIFY_CB)
/* ARGOS notifer data */
static struct notifier_block argos_wifi; /* STA */
static struct notifier_block argos_p2p; /* P2P */
-
-typedef struct {
- struct net_device *wlan_primary_netdev;
- int argos_rps_cpus_enabled;
-} argos_rps_ctrl;
-
argos_rps_ctrl argos_rps_ctrl_data;
-#define RPS_TPUT_THRESHOLD 300
-#define DELAY_TO_CLEAR_RPS_CPUS 300
#endif // endif
-#if defined(BT_OVER_SDIO)
-extern void wl_android_set_wifi_on_flag(bool enable);
-#endif /* BT_OVER_SDIO */
-
#ifdef DHD_FW_COREDUMP
-static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
#endif /* DHD_FW_COREDUMP */
#ifdef DHD_LOG_DUMP
-/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
-* 2: DLD_BUF_TYPE_SPECIAL
-*/
-#define DLD_BUFFER_NUM 3
-
-#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
-#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
-#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
-
-#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_GENERAL_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
-#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
-#ifdef CONFIG_LOG_BUF_SHIFT
-/* 15% of kernel log buf size, if for example klog buf size is 512KB
-* 15% of 512KB ~= 80KB
-*/
-#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
- (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
-#endif /* CONFIG_LOG_BUF_SHIFT */
-#define LOG_DUMP_COOKIE_BUFSIZE 1024u
struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
-static int dld_buf_size[DLD_BUFFER_NUM] = {
- LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
- LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
- LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
-};
-
-typedef struct {
- char *hdr_str;
- log_dump_section_type_t sec_type;
-} dld_hdr_t;
/* Only header for log dump buffers is stored in array
* header for sections like 'dhd dump', 'ext trap'
{SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
};
+static int dld_buf_size[DLD_BUFFER_NUM] = {
+ LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
+ LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
+ LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
+};
+
static void dhd_log_dump_init(dhd_pub_t *dhd);
static void dhd_log_dump_deinit(dhd_pub_t *dhd);
static void dhd_log_dump(void *handle, void *event_info, u8 event);
static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
-
-#define DHD_PRINT_BUF_NAME_LEN 30
+static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
+static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
+void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
+void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
+static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
#endif /* DHD_LOG_DUMP */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
#include "dhd_bt_interface.h"
#endif /* defined (BT_OVER_SDIO) */
-#ifdef SHOW_LOGTRACE
-static int dhd_trace_open_proc(struct inode *inode, struct file *file);
-ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff);
-
-static const struct file_operations proc_file_fops = {
- .read = dhd_trace_read_proc,
- .open = dhd_trace_open_proc,
- .release = seq_release,
-};
-#endif // endif
-
#ifdef WL_STATIC_IF
bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
#endif /* WL_STATIC_IF */
atomic_t exit_in_progress = ATOMIC_INIT(0);
-typedef struct dhd_if_event {
- struct list_head list;
- wl_event_data_if_t event;
- char name[IFNAMSIZ+1];
- uint8 mac[ETHER_ADDR_LEN];
-} dhd_if_event_t;
-
-/* Interface control information */
-typedef struct dhd_if {
- struct dhd_info *info; /* back pointer to dhd_info */
- /* OS/stack specifics */
- struct net_device *net;
- int idx; /* iface idx in dongle */
- uint subunit; /* subunit */
- uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
- bool set_macaddress;
- bool set_multicast;
- uint8 bssidx; /* bsscfg index for the interface */
- bool attached; /* Delayed attachment when unset */
- bool txflowcontrol; /* Per interface flow control indicator */
- char name[IFNAMSIZ+1]; /* linux interface name */
- char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
- struct net_device_stats stats;
-#ifdef PCIE_FULL_DONGLE
- struct list_head sta_list; /* sll of associated stations */
- spinlock_t sta_list_lock; /* lock for manipulating sll */
-#endif /* PCIE_FULL_DONGLE */
- uint32 ap_isolate; /* ap-isolation settings */
-#ifdef DHD_L2_FILTER
- bool parp_enable;
- bool parp_discard;
- bool parp_allnode;
- arp_table_t *phnd_arp_table;
- /* for Per BSS modification */
- bool dhcp_unicast;
- bool block_ping;
- bool grat_arp;
- bool block_tdls;
-#endif /* DHD_L2_FILTER */
-#ifdef DHD_MCAST_REGEN
- bool mcast_regen_bss_enable;
-#endif // endif
- bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
- cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
- uint8 tx_paths_active;
- bool del_in_progress;
- bool static_if; /* used to avoid some operations on static_if */
-} dhd_if_t;
-
-struct ipv6_work_info_t {
- uint8 if_idx;
- char ipv6_addr[IPV6_ADDR_LEN];
- unsigned long event;
-};
static void dhd_process_daemon_msg(struct sk_buff *skb);
static void dhd_destroy_to_notifier_skt(void);
static int dhd_create_to_notifier_skt(void);
.groups = 1,
.input = dhd_process_daemon_msg,
};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
-
-typedef struct dhd_dump {
- uint8 *buf;
- int bufsize;
- uint8 *hscb_buf;
- int hscb_bufsize;
-} dhd_dump_t;
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-struct dhd_rx_tx_work {
- struct work_struct work;
- struct sk_buff *skb;
- struct net_device *net;
- struct dhd_pub *pub;
-};
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-/* When Perimeter locks are deployed, any blocking calls must be preceeded
- * with a PERIM UNLOCK and followed by a PERIM LOCK.
- * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
- * wait_event_timeout().
- */
-
-/* Local private structure (extension of pub) */
-typedef struct dhd_info {
-#if defined(WL_WIRELESS_EXT)
- wl_iw_t iw; /* wireless extensions state (must be first) */
-#endif /* defined(WL_WIRELESS_EXT) */
- dhd_pub_t pub;
- /* for supporting multiple interfaces.
- * static_ifs hold the net ifaces without valid FW IF
- */
- dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
-
- wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
- char fw_path[PATH_MAX]; /* path to firmware image */
- char nv_path[PATH_MAX]; /* path to nvram vars file */
- char clm_path[PATH_MAX]; /* path to clm vars file */
- char conf_path[PATH_MAX]; /* path to config vars file */
-#ifdef DHD_UCODE_DOWNLOAD
- char uc_path[PATH_MAX]; /* path to ucode image */
-#endif /* DHD_UCODE_DOWNLOAD */
-
- /* serialize dhd iovars */
- struct mutex dhd_iovar_mutex;
-
- struct semaphore proto_sem;
-#ifdef PROP_TXSTATUS
- spinlock_t wlfc_spinlock;
-
-#ifdef BCMDBUS
- ulong wlfc_lock_flags;
- ulong wlfc_pub_lock_flags;
-#endif /* BCMDBUS */
-#endif /* PROP_TXSTATUS */
- wait_queue_head_t ioctl_resp_wait;
- wait_queue_head_t d3ack_wait;
- wait_queue_head_t dhd_bus_busy_state_wait;
- wait_queue_head_t dmaxfer_wait;
- uint32 default_wd_interval;
-
- struct timer_list timer;
- bool wd_timer_valid;
- struct tasklet_struct tasklet;
- spinlock_t sdlock;
- spinlock_t txqlock;
- spinlock_t dhd_lock;
-#ifdef BCMDBUS
- ulong txqlock_flags;
-#else
-
- struct semaphore sdsem;
- tsk_ctl_t thr_dpc_ctl;
- tsk_ctl_t thr_wdt_ctl;
-#endif /* BCMDBUS */
-
- tsk_ctl_t thr_rxf_ctl;
- spinlock_t rxf_lock;
- bool rxthread_enabled;
-
- /* Wakelocks */
-#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- struct wake_lock wl_wifi; /* Wifi wakelock */
- struct wake_lock wl_rxwake; /* Wifi rx wakelock */
- struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
- struct wake_lock wl_wdwake; /* Wifi wd wakelock */
- struct wake_lock wl_evtwake; /* Wifi event wakelock */
- struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
- struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
-#ifdef BCMPCIE_OOB_HOST_WAKE
- struct wake_lock wl_intrwake; /* Host wakeup wakelock */
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- struct wake_lock wl_scanwake; /* Wifi scan wakelock */
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- /* net_device interface lock, prevent race conditions among net_dev interface
- * calls and wifi_on or wifi_off
- */
- struct mutex dhd_net_if_mutex;
- struct mutex dhd_suspend_mutex;
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
- struct mutex dhd_apf_mutex;
-#endif /* PKT_FILTER_SUPPORT && APF */
-#endif // endif
- spinlock_t wakelock_spinlock;
- spinlock_t wakelock_evt_spinlock;
- uint32 wakelock_counter;
- int wakelock_wd_counter;
- int wakelock_rx_timeout_enable;
- int wakelock_ctrl_timeout_enable;
- bool waive_wakelock;
- uint32 wakelock_before_waive;
-
- /* Thread to issue ioctl for multicast */
- wait_queue_head_t ctrl_wait;
- atomic_t pend_8021x_cnt;
- dhd_attach_states_t dhd_state;
-#ifdef SHOW_LOGTRACE
- dhd_event_log_t event_data;
-#endif /* SHOW_LOGTRACE */
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- struct early_suspend early_suspend;
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
-
-#ifdef ARP_OFFLOAD_SUPPORT
- u32 pend_ipaddr;
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
- spinlock_t tcpack_lock;
-#endif /* DHDTCPACK_SUPPRESS */
-#ifdef FIX_CPU_MIN_CLOCK
- bool cpufreq_fix_status;
- struct mutex cpufreq_fix;
- struct pm_qos_request dhd_cpu_qos;
-#ifdef FIX_BUS_MIN_CLOCK
- struct pm_qos_request dhd_bus_qos;
-#endif /* FIX_BUS_MIN_CLOCK */
-#endif /* FIX_CPU_MIN_CLOCK */
- void *dhd_deferred_wq;
-#ifdef DEBUG_CPU_FREQ
- struct notifier_block freq_trans;
- int __percpu *new_freq;
-#endif // endif
- unsigned int unit;
- struct notifier_block pm_notifier;
-#ifdef DHD_PSTA
- uint32 psta_mode; /* PSTA or PSR */
-#endif /* DHD_PSTA */
-#ifdef DHD_WET
- uint32 wet_mode;
-#endif /* DHD_WET */
-#ifdef DHD_DEBUG
- dhd_dump_t *dump;
- struct timer_list join_timer;
- u32 join_timeout_val;
- bool join_timer_active;
- uint scan_time_count;
- struct timer_list scan_timer;
- bool scan_timer_active;
-#endif // endif
-#if defined(DHD_LB)
- /* CPU Load Balance dynamic CPU selection */
-
- /* Variable that tracks the currect CPUs available for candidacy */
- cpumask_var_t cpumask_curr_avail;
-
- /* Primary and secondary CPU mask */
- cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
- cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
-
- struct notifier_block cpu_notifier;
-
- /* Tasklet to handle Tx Completion packet freeing */
- struct tasklet_struct tx_compl_tasklet;
- atomic_t tx_compl_cpu;
-
- /* Tasklet to handle RxBuf Post during Rx completion */
- struct tasklet_struct rx_compl_tasklet;
- atomic_t rx_compl_cpu;
-
- /* Napi struct for handling rx packet sendup. Packets are removed from
- * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
- * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
- * to run to rx_napi_cpu.
- */
- struct sk_buff_head rx_pend_queue ____cacheline_aligned;
- struct sk_buff_head rx_napi_queue ____cacheline_aligned;
- struct napi_struct rx_napi_struct ____cacheline_aligned;
- atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
- struct net_device *rx_napi_netdev; /* netdev of primary interface */
-
- struct work_struct rx_napi_dispatcher_work;
- struct work_struct tx_compl_dispatcher_work;
- struct work_struct tx_dispatcher_work;
- struct work_struct rx_compl_dispatcher_work;
-
- /* Number of times DPC Tasklet ran */
- uint32 dhd_dpc_cnt;
- /* Number of times NAPI processing got scheduled */
- uint32 napi_sched_cnt;
- /* Number of times NAPI processing ran on each available core */
- uint32 *napi_percpu_run_cnt;
- /* Number of times RX Completions got scheduled */
- uint32 rxc_sched_cnt;
- /* Number of times RX Completion ran on each available core */
- uint32 *rxc_percpu_run_cnt;
- /* Number of times TX Completions got scheduled */
- uint32 txc_sched_cnt;
- /* Number of times TX Completions ran on each available core */
- uint32 *txc_percpu_run_cnt;
- /* CPU status */
- /* Number of times each CPU came online */
- uint32 *cpu_online_cnt;
- /* Number of times each CPU went offline */
- uint32 *cpu_offline_cnt;
-
- /* Number of times TX processing run on each core */
- uint32 *txp_percpu_run_cnt;
- /* Number of times TX start run on each core */
- uint32 *tx_start_percpu_run_cnt;
-
- /* Tx load balancing */
-
- /* TODO: Need to see if batch processing is really required in case of TX
- * processing. In case of RX the Dongle can send a bunch of rx completions,
- * hence we took a 3 queue approach
- * enque - adds the skbs to rx_pend_queue
- * dispatch - uses a lock and adds the list of skbs from pend queue to
- * napi queue
- * napi processing - copies the pend_queue into a local queue and works
- * on it.
- * But for TX its going to be 1 skb at a time, so we are just thinking
- * of using only one queue and use the lock supported skb queue functions
- * to add and process it. If its in-efficient we'll re-visit the queue
- * design.
- */
-
- /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
- /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
- /*
- * From the Tasklet that actually sends out data
- * copy the list tx_pend_queue into tx_active_queue. There by we need
- * to spinlock to only perform the copy the rest of the code ie to
- * construct the tx_pend_queue and the code to process tx_active_queue
- * can be lockless. The concept is borrowed as is from RX processing
- */
- /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
-
- /* Control TXP in runtime, enable by default */
- atomic_t lb_txp_active;
-
- /*
- * When the NET_TX tries to send a TX packet put it into tx_pend_queue
- * For now, the processing tasklet will also direcly operate on this
- * queue
- */
- struct sk_buff_head tx_pend_queue ____cacheline_aligned;
-
- /* cpu on which the DHD Tx is happenning */
- atomic_t tx_cpu;
-
- /* CPU on which the Network stack is calling the DHD's xmit function */
- atomic_t net_tx_cpu;
-
- /* Tasklet context from which the DHD's TX processing happens */
- struct tasklet_struct tx_tasklet;
-
- /*
- * Consumer Histogram - NAPI RX Packet processing
- * -----------------------------------------------
- * On Each CPU, when the NAPI RX Packet processing call back was invoked
- * how many packets were processed is captured in this data structure.
- * Now its difficult to capture the "exact" number of packets processed.
- * So considering the packet counter to be a 32 bit one, we have a
- * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
- * processed is rounded off to the next power of 2 and put in the
- * approriate "bin" the value in the bin gets incremented.
- * For example, assume that in CPU 1 if NAPI Rx runs 3 times
- * and the packet count processed is as follows (assume the bin counters are 0)
- * iteration 1 - 10 (the bin counter 2^4 increments to 1)
- * iteration 2 - 30 (the bin counter 2^5 increments to 1)
- * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
- */
- uint32 *napi_rx_hist[HIST_BIN_SIZE];
- uint32 *txc_hist[HIST_BIN_SIZE];
- uint32 *rxc_hist[HIST_BIN_SIZE];
-#endif /* DHD_LB */
-
-#ifdef SHOW_LOGTRACE
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- tsk_ctl_t thr_logtrace_ctl;
-#else
- struct delayed_work event_log_dispatcher_work;
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
-#endif /* SHOW_LOGTRACE */
-
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
- struct kobject dhd_kobj;
- struct timer_list timesync_timer;
-#if defined(BT_OVER_SDIO)
- char btfw_path[PATH_MAX];
-#endif /* defined (BT_OVER_SDIO) */
-#ifdef WL_MONITOR
- struct net_device *monitor_dev; /* monitor pseudo device */
- struct sk_buff *monitor_skb;
- uint monitor_len;
- uint monitor_type; /* monitor pseudo device */
-#endif /* WL_MONITOR */
-#if defined(BT_OVER_SDIO)
- struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
- int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
-#endif /* BT_OVER_SDIO */
-#ifdef SHOW_LOGTRACE
- struct sk_buff_head evt_trace_queue ____cacheline_aligned;
-#endif // endif
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- struct workqueue_struct *tx_wq;
- struct workqueue_struct *rx_wq;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_DEBUG_UART
- bool duart_execute;
-#endif /* DHD_DEBUG_UART */
- struct mutex logdump_lock;
- /* indicates mem_dump was scheduled as work queue or called directly */
- bool scheduled_memdump;
- /* indicates sssrdump is called directly instead of scheduling work queue */
- bool no_wq_sssrdump;
-#if defined(PCIE_FULL_DONGLE)
- /* Spinlock used in Linux implementation of dhd_pcie_backplane_access_[un]lock() */
- spinlock_t backplane_access_lock;
-#endif /* defined(PCIE_FULL_DONGLE) */
-} dhd_info_t;
-
-#ifdef WL_MONITOR
-#define MONPKT_EXTRA_LEN 48u
-#endif /* WL_MONITOR */
-
-#define DHDIF_FWDER(dhdif) FALSE
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
#if defined(BT_OVER_SDIO)
/* Flag to indicate if driver is initialized */
int disable_proptx = 0;
module_param(op_mode, int, 0644);
extern int wl_control_wl_start(struct net_device *dev);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
+#if defined(BCMLXSDMMC) || defined(BCMDBUS)
struct semaphore dhd_registration_sem;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#endif /* BCMXSDMMC */
#ifdef DHD_LOG_DUMP
int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
int logdump_ecntr_enable = FALSE;
#endif /* EWP_ECNTRS_LOGGING */
module_param(logdump_ecntr_enable, int, 0644);
+#ifdef EWP_RTT_LOGGING
+int logdump_rtt_enable = TRUE;
+#else
+int logdump_rtt_enable = FALSE;
+#endif /* EWP_RTT_LOGGING */
+module_param(logdump_rtt_enable, int, 0644);
#endif /* DHD_LOG_DUMP */
#ifdef EWP_EDL
int host_edl_support = TRUE;
#endif /* WL_CFG80211 */
static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
+
#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
/* update rx_pkt_chainable state of dhd interface */
static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
#ifdef ENABLE_ARP_SNOOP_MODE
-uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
+uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
+ ARP_OL_UPDATE_HOST_CACHE);
#else
-uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
#endif /* ENABLE_ARP_SNOOP_MODE */
module_param(dhd_arp_mode, uint, 0);
#endif /* PCIE_FULL_DONGLE */
#ifdef DHD_SSSR_DUMP
+int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
extern uint support_sssr_dump;
module_param(support_sssr_dump, uint, 0);
#endif /* DHD_SSSR_DUMP */
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
-#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+#if defined(DHD_LB_RXP)
static int dhd_napi_weight = 32;
module_param(dhd_napi_weight, int, 0644);
-#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+#endif /* DHD_LB_RXP */
#ifdef PCIE_FULL_DONGLE
extern int h2d_max_txpost;
module_param(tpoweron_scale, uint, 0644);
#endif /* FORCE_TPOWERON */
-#ifdef DHD_DHCP_DUMP
-struct bootp_fmt {
- struct iphdr ip_header;
- struct udphdr udp_header;
- uint8 op;
- uint8 htype;
- uint8 hlen;
- uint8 hops;
- uint32 transaction_id;
- uint16 secs;
- uint16 flags;
- uint32 client_ip;
- uint32 assigned_ip;
- uint32 server_ip;
- uint32 relay_ip;
- uint8 hw_address[16];
- uint8 server_name[64];
- uint8 file_name[128];
- uint8 options[312];
-};
-
-static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
-static const char dhcp_ops[][10] = {
- "NA", "REQUEST", "REPLY"
-};
-static const char dhcp_types[][10] = {
- "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
-};
-static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
-#endif /* DHD_DHCP_DUMP */
-
-#ifdef FILTER_IE
-#define FILTER_IE_PATH "/etc/wifi/filter_ie"
-#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
-#define FILE_BLOCK_READ_SIZE 256
-#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
-#endif /* FILTER_IE */
-
-#define NULL_CHECK(p, s, err) \
- do { \
- if (!(p)) { \
- printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
- err = BCME_ERROR; \
- return err; \
- } \
- } while (0)
-
-#ifdef DHD_ICMP_DUMP
-#include <net/icmp.h>
-static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
-#endif /* DHD_ICMP_DUMP */
-
-/* Functions to manage sysfs interface for dhd */
-static int dhd_sysfs_init(dhd_info_t *dhd);
-static void dhd_sysfs_exit(dhd_info_t *dhd);
-
#ifdef SHOW_LOGTRACE
static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
char *map_file);
#endif /* SHOW_LOGTRACE */
-#if defined(DHD_LB)
-
-static void
-dhd_lb_set_default_cpus(dhd_info_t *dhd)
-{
- /* Default CPU allocation for the jobs */
- atomic_set(&dhd->rx_napi_cpu, 1);
- atomic_set(&dhd->rx_compl_cpu, 2);
- atomic_set(&dhd->tx_compl_cpu, 2);
- atomic_set(&dhd->tx_cpu, 2);
- atomic_set(&dhd->net_tx_cpu, 0);
-}
+#ifdef USE_WFA_CERT_CONF
+int g_frameburst = 1;
+#endif /* USE_WFA_CERT_CONF */
-static void
-dhd_cpumasks_deinit(dhd_info_t *dhd)
-{
- free_cpumask_var(dhd->cpumask_curr_avail);
- free_cpumask_var(dhd->cpumask_primary);
- free_cpumask_var(dhd->cpumask_primary_new);
- free_cpumask_var(dhd->cpumask_secondary);
- free_cpumask_var(dhd->cpumask_secondary_new);
-}
+static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
-static int
-dhd_cpumasks_init(dhd_info_t *dhd)
-{
- int id;
- uint32 cpus, num_cpus = num_possible_cpus();
- int ret = 0;
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
- DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
- DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
+#ifdef PCIE_FULL_DONGLE
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+ spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+ spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
- if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
- DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
- ret = -ENOMEM;
- goto fail;
- }
-
- cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
- cpumask_clear(dhd->cpumask_primary);
- cpumask_clear(dhd->cpumask_secondary);
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
+ struct list_head *snapshot_list);
+static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+#endif /* PCIE_FULL_DONGLE */
- if (num_cpus > 32) {
- DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
- ASSERT(0);
- }
+/* Control fw roaming */
+#ifdef BCMCCX
+uint dhd_roam_disable = 0;
+#else
+uint dhd_roam_disable = 0;
+#endif /* BCMCCX */
- cpus = DHD_LB_PRIMARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_primary);
- }
+#ifdef BCMDBGFS
+extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
+extern void dhd_dbgfs_remove(void);
+#endif // endif
- cpus = DHD_LB_SECONDARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_secondary);
- }
+static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
+module_param(pcie_txs_metadata_enable, int, 0);
- return ret;
-fail:
- dhd_cpumasks_deinit(dhd);
- return ret;
-}
+/* Control radio state */
+uint dhd_radio_up = 1;
-/*
- * The CPU Candidacy Algorithm
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * The available CPUs for selection are divided into two groups
- * Primary Set - A CPU mask that carries the First Choice CPUs
- * Secondary Set - A CPU mask that carries the Second Choice CPUs.
- *
- * There are two types of Job, that needs to be assigned to
- * the CPUs, from one of the above mentioned CPU group. The Jobs are
- * 1) Rx Packet Processing - napi_cpu
- * 2) Completion Processiong (Tx, RX) - compl_cpu
- *
- * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
- * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
- * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
- * If there are more processors free, it assigns one to compl_cpu.
- * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
- * CPU, as much as possible.
- *
- * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
- * would allow Tx completion skb's to be released into a local free pool from
- * which the rx buffer posts could have been serviced. it is important to note
- * that a Tx packet may not have a large enough buffer for rx posting.
- */
-void dhd_select_cpu_candidacy(dhd_info_t *dhd)
-{
- uint32 primary_available_cpus; /* count of primary available cpus */
- uint32 secondary_available_cpus; /* count of secondary available cpus */
- uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
- uint32 compl_cpu = 0; /* cpu selected for completion jobs */
- uint32 tx_cpu = 0; /* cpu selected for tx processing job */
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
- cpumask_clear(dhd->cpumask_primary_new);
- cpumask_clear(dhd->cpumask_secondary_new);
+/* The following are specific to the SDIO dongle */
- /*
- * Now select from the primary mask. Even if a Job is
- * already running on a CPU in secondary group, we still move
- * to primary CPU. So no conditional checks.
- */
- cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
- dhd->cpumask_curr_avail);
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
- cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
- dhd->cpumask_curr_avail);
+/* DS Exit response timeout */
+int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
- primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
- if (primary_available_cpus > 0) {
- napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
- /* If no further CPU is available,
- * cpumask_next returns >= nr_cpu_ids
- */
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
- /* In case there are no more CPUs, do completions & Tx in same CPU */
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = tx_cpu;
- }
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
- DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
- /* -- Now check for the CPUs from the secondary mask -- */
- secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
- DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
- __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
+#endif /* BCMSDIO */
- if (secondary_available_cpus > 0) {
- /* At this point if napi_cpu is unassigned it means no CPU
- * is online from Primary Group
- */
- if (napi_cpu == 0) {
- napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (tx_cpu == 0) {
- tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (compl_cpu == 0) {
- compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
- }
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
- /* If no CPU was available for tx processing, choose CPU 0 */
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
- /* If no CPU was available for completion, choose CPU 0 */
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = 0;
- }
- if ((primary_available_cpus == 0) &&
- (secondary_available_cpus == 0)) {
- /* No CPUs available from primary or secondary mask */
- napi_cpu = 1;
- compl_cpu = 0;
- tx_cpu = 2;
- }
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+/* Use in dongle supplicant for 4-way handshake */
+#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
+/* Enable idsup by default (if supported in fw) */
+uint dhd_use_idsup = 1;
+#else
+uint dhd_use_idsup = 0;
+#endif /* WLFBT || WL_ENABLE_IDSUP */
+module_param(dhd_use_idsup, uint, 0);
+#endif /* BCMSUP_4WAY_HANDSHAKE */
- DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+#ifndef BCMDBUS
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+module_param(allow_delay_fwdl, int, 0);
+#endif /* !BCMDBUS */
- ASSERT(napi_cpu < nr_cpu_ids);
- ASSERT(compl_cpu < nr_cpu_ids);
- ASSERT(tx_cpu < nr_cpu_ids);
+#ifdef ECOUNTER_PERIODIC_DISABLE
+uint enable_ecounter = FALSE;
+#else
+uint enable_ecounter = TRUE;
+#endif // endif
+module_param(enable_ecounter, uint, 0);
- atomic_set(&dhd->rx_napi_cpu, napi_cpu);
- atomic_set(&dhd->tx_compl_cpu, compl_cpu);
- atomic_set(&dhd->rx_compl_cpu, compl_cpu);
- atomic_set(&dhd->tx_cpu, tx_cpu);
+/* TCM verification flag */
+uint dhd_tcm_test_enable = FALSE;
+module_param(dhd_tcm_test_enable, uint, 0644);
- return;
-}
+extern char dhd_version[];
+extern char fw_version[];
+extern char clm_version[];
-/*
- * Function to handle CPU Hotplug notifications.
- * One of the task it does is to trigger the CPU Candidacy algorithm
- * for load balancing.
- */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
-int dhd_cpu_startup_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
+#ifdef DHD_PM_CONTROL_FROM_FILE
+bool g_pm_control;
+#ifdef DHD_EXPORT_CNTL_FILE
+int pmmode_val;
+#endif /* DHD_EXPORT_CNTL_FILE */
+void sec_control_pm(dhd_pub_t *dhd, uint *);
+#endif /* DHD_PM_CONTROL_FROM_FILE */
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
- return 0;
-}
+#ifndef BCMDBUS
+static void dhd_dpc(ulong data);
+#endif /* !BCMDBUS */
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
-int dhd_cpu_teardown_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
+static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event_ptr, void **data_ptr);
- return 0;
-}
-#else
-int
-dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
{
- unsigned long int cpu = (unsigned long int)hcpu;
+ int ret = NOTIFY_DONE;
+ bool suspend = FALSE;
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif // endif
- dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
+ dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
- if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
- DHD_INFO(("%s(): LB data is not initialized yet.\n",
- __FUNCTION__));
- return NOTIFY_BAD;
- }
+ BCM_REFERENCE(dhdinfo);
+ BCM_REFERENCE(suspend);
- switch (action)
- {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ suspend = TRUE;
+ break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
- default:
- break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ suspend = FALSE;
+ break;
}
- return NOTIFY_OK;
-}
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
-
-static int dhd_register_cpuhp_callback(dhd_info_t *dhd)
-{
- int cpuhp_ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
- dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
-
- if (cpuhp_ret < 0) {
- DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
- __FUNCTION__, cpuhp_ret));
+#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
+ if (suspend) {
+ DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+ dhd_wlfc_suspend(&dhdinfo->pub);
+ DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+ } else {
+ dhd_wlfc_resume(&dhdinfo->pub);
}
-#else
- /*
- * If we are able to initialize CPU masks, lets register to the
- * CPU Hotplug framework to change the CPU for each job dynamically
- * using candidacy algorithm.
- */
- dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
- register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
- return cpuhp_ret;
-}
+#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
+
+ dhd_mmc_suspend = suspend;
+ smp_mb();
-static int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
-{
- int ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- /* Don't want to call tear down while unregistering */
- cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
-#else
- if (dhd->cpu_notifier.notifier_call != NULL) {
- unregister_cpu_notifier(&dhd->cpu_notifier);
- }
-#endif // endif
return ret;
}
-#if defined(DHD_LB_STATS)
-void dhd_lb_stats_init(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- int i, j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
- DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
- DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
- dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_percpu_run_cnt) {
- DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+#if defined(DHD_H2D_LOG_TIME_SYNC)
+static void
+dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
+#endif /* DHD_H2D_LOG_TIME_SYNC */
- DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+ dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+ dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
+ int ifidx; /* interface index */
+ void * lkup;
+} dhd_dev_priv_t;
- dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_percpu_run_cnt) {
- DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
- DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = (dhd_info_t *)NULL;
+ dev_priv->ifp = (dhd_if_t *)NULL;
+ dev_priv->ifidx = DHD_BAD_IF;
+ dev_priv->lkup = (void *)NULL;
+}
- dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_percpu_run_cnt) {
- DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
-
- dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_online_cnt) {
- DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
-
- dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_offline_cnt) {
- DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
-
- dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txp_percpu_run_cnt) {
- DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
-
- dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->tx_start_percpu_run_cnt) {
- DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
-
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_rx_hist[j]) {
- DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
- }
- }
-#ifdef DHD_LB_TXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_hist[j]) {
- DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_hist[j]) {
- DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_RXC */
- return;
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+ int ifidx)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = dhd;
+ dev_priv->ifp = ifp;
+ dev_priv->ifidx = ifidx;
}
-void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
+/* Return interface pointer */
+struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
{
- dhd_info_t *dhd;
- int j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
+ ASSERT(ifidx < DHD_MAX_IFS);
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
+ if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
+ return NULL;
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
+ return dhdp->info->iflist[ifidx];
+}
- if (dhd->napi_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
- dhd->napi_percpu_run_cnt = NULL;
- }
- if (dhd->rxc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
- dhd->rxc_percpu_run_cnt = NULL;
- }
- if (dhd->txc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
- dhd->txc_percpu_run_cnt = NULL;
- }
- if (dhd->cpu_online_cnt) {
- MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
- dhd->cpu_online_cnt = NULL;
- }
- if (dhd->cpu_offline_cnt) {
- MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
- dhd->cpu_offline_cnt = NULL;
- }
+#ifdef PCIE_FULL_DONGLE
- if (dhd->txp_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
- dhd->txp_percpu_run_cnt = NULL;
- }
- if (dhd->tx_start_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
- dhd->tx_start_percpu_run_cnt = NULL;
- }
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- if (dhd->napi_rx_hist[j]) {
- MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
- dhd->napi_rx_hist[j] = NULL;
- }
-#ifdef DHD_LB_TXC
- if (dhd->txc_hist[j]) {
- MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
- dhd->txc_hist[j] = NULL;
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- if (dhd->rxc_hist[j]) {
- MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
- dhd->rxc_hist[j] = NULL;
- }
-#endif /* DHD_LB_RXC */
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+ .pub = {
+ .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+ .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+ .up = FALSE,
+ .busstate = DHD_BUS_DOWN
}
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL (&dhd_info_null.pub)
- return;
-}
-
-static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
- struct bcmstrbuf *strbuf, uint32 **hist)
-{
- int i, j;
- uint32 *per_cpu_total;
- uint32 total = 0;
- uint32 num_cpus = num_possible_cpus();
-
- per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
- if (!per_cpu_total) {
- DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
- return;
- }
- bzero(per_cpu_total, sizeof(uint32) * num_cpus);
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+ .reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
- bcm_bprintf(strbuf, "CPU: \t\t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\nBin\n");
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#ifdef WMF
+ .wmf = { .wmf_enable = TRUE },
+#endif // endif
+ .info = DHD_INFO_NULL,
+ .net = DHD_NET_DEV_NULL,
+ .idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL (&dhd_if_null)
- for (i = 0; i < HIST_BIN_SIZE; i++) {
- bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
- for (j = 0; j < num_cpus; j++) {
- bcm_bprintf(strbuf, "%d\t", hist[i][j]);
- }
- bcm_bprintf(strbuf, "\n");
- }
- bcm_bprintf(strbuf, "Per CPU Total \t");
- total = 0;
- for (i = 0; i < num_cpus; i++) {
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- per_cpu_total[i] += (hist[j][i] * (1<<j));
- }
- bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
- total += per_cpu_total[i];
- }
- bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
- if (per_cpu_total) {
- MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
- per_cpu_total = NULL;
- }
- return;
-}
+/** Interface STA list management. */
-static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
-{
- int i, num_cpus = num_possible_cpus();
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
- bcm_bprintf(strbuf, "CPU: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\n");
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void dhd_if_flush_sta(dhd_if_t * ifp);
- bcm_bprintf(strbuf, "Val: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%u\t", *(p+i));
- bcm_bprintf(strbuf, "\n");
- return;
-}
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
-void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
{
- dhd_info_t *dhd;
-
- if (dhdp == NULL || strbuf == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
- __FUNCTION__, dhdp, strbuf));
- return;
- }
+ int prio;
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
+ ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
- bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
- bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+ /*
+ * Flush and free all packets in all flowring's queues belonging to sta.
+ * Packets in flow ring will be flushed later.
+ */
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ uint16 flowid = sta->flowid[prio];
- bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
- dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
- dhd->txc_sched_cnt);
+ if (flowid != FLOWID_INVALID) {
+ unsigned long flags;
+ flow_ring_node_t * flow_ring_node;
-#ifdef DHD_LB_RXP
- bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
-#endif /* DHD_LB_RXP */
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
-#ifdef DHD_LB_RXC
- bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
-#endif /* DHD_LB_RXC */
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ if (flow_ring_node) {
+ flow_queue_t *queue = &flow_ring_node->queue;
-#ifdef DHD_LB_TXC
- bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
-#endif /* DHD_LB_TXC */
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
-#ifdef DHD_LB_TXP
- bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
+ if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+ void * pkt;
+ while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
+ NULL) {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ }
- bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
-#endif /* DHD_LB_TXP */
-}
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ }
+ }
-/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
-static inline uint32 next_larger_power2(uint32 num)
-{
- num--;
- num |= (num >> 1);
- num |= (num >> 2);
- num |= (num >> 4);
- num |= (num >> 8);
- num |= (num >> 16);
+ sta->flowid[prio] = FLOWID_INVALID;
+ }
- return (num + 1);
+ id16_map_free(dhdp->staid_allocator, sta->idx);
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+ sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+ sta->ifidx = DHD_BAD_IF;
+ bzero(sta->ea.octet, ETHER_ADDR_LEN);
+ INIT_LIST_HEAD(&sta->list);
+ sta->idx = ID16_INVALID; /* implying free */
}
-static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
{
- uint32 bin_power;
- uint32 *p;
- bin_power = next_larger_power2(count);
-
- switch (bin_power) {
- case 1: p = bin[0] + cpu; break;
- case 2: p = bin[1] + cpu; break;
- case 4: p = bin[2] + cpu; break;
- case 8: p = bin[3] + cpu; break;
- case 16: p = bin[4] + cpu; break;
- case 32: p = bin[5] + cpu; break;
- case 64: p = bin[6] + cpu; break;
- case 128: p = bin[7] + cpu; break;
- default : p = bin[8] + cpu; break;
- }
+ uint16 idx;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
- *p = *p + 1;
- return;
-}
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
-extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
+ idx = id16_map_alloc(dhdp->staid_allocator);
+ if (idx == ID16_INVALID) {
+ DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
+ sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+ sta = &sta_pool[idx];
- return;
-}
+ ASSERT((sta->idx == ID16_INVALID) &&
+ (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
-extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
+ sta->idx = idx; /* implying allocated */
- return;
+ return sta;
}
-extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
return;
}
-extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
}
-extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
-}
-#endif /* DHD_LB_STATS */
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void * staid_allocator;
-#endif /* DHD_LB */
+ ASSERT(dhdp != (dhd_pub_t *)NULL);
+ ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
-#ifdef USE_WFA_CERT_CONF
-int g_frameburst = 1;
-#endif /* USE_WFA_CERT_CONF */
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+ if (staid_allocator == NULL) {
+ DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
-static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
+ /* Pre allocate a pool of dhd_sta objects (one extra). */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+ sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+ if (sta_pool == NULL) {
+ DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+ id16_map_fini(dhdp->osh, staid_allocator);
+ return BCME_ERROR;
+ }
-/* DHD Perimiter lock only used in router with bypass forwarding. */
-#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
-#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
-#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
+ dhdp->sta_pool = sta_pool;
+ dhdp->staid_allocator = staid_allocator;
-#ifdef PCIE_FULL_DONGLE
-#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
-#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
- spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
-#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
- spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
-#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
-static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
- struct list_head *snapshot_list);
-static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
-#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
-#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
-#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-#endif /* PCIE_FULL_DONGLE */
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
+ dhd_sta_free(dhdp, sta);
+ }
-/* Control fw roaming */
-#ifdef BCMCCX
-uint dhd_roam_disable = 0;
-#else
-uint dhd_roam_disable = 0;
-#endif /* BCMCCX */
+ return BCME_OK;
+}
-#ifdef BCMDBGFS
-extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
-extern void dhd_dbgfs_remove(void);
-#endif // endif
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+ dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
-static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
-module_param(pcie_txs_metadata_enable, int, 0);
+ if (sta_pool) {
+ int idx;
+ int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ for (idx = 1; idx <= max_sta; idx++) {
+ ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+ ASSERT(sta_pool[idx].idx == ID16_INVALID);
+ }
+ MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+ dhdp->sta_pool = NULL;
+ }
-/* Control radio state */
-uint dhd_radio_up = 1;
+ id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+ dhdp->staid_allocator = NULL;
+}
-/* Network inteface name */
-char iface_name[IFNAMSIZ] = {'\0'};
-module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void *staid_allocator;
-/* The following are specific to the SDIO dongle */
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
-/* IOCTL response timeout */
-int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+ sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+ staid_allocator = dhdp->staid_allocator;
-/* DS Exit response timeout */
-int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
+ if (!sta_pool) {
+ DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+ return;
+ }
-/* Idle timeout for backplane clock */
-int dhd_idletime = DHD_IDLETIME_TICKS;
-module_param(dhd_idletime, int, 0);
+ if (!staid_allocator) {
+ DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+ return;
+ }
-/* Use polling */
-uint dhd_poll = FALSE;
-module_param(dhd_poll, uint, 0);
+ /* clear free pool */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ bzero((uchar *)sta_pool, sta_pool_memsz);
-/* Use interrupts */
-uint dhd_intr = TRUE;
-module_param(dhd_intr, uint, 0);
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ id16_map_clear(staid_allocator, max_sta, 1);
-/* SDIO Drive Strength (in milliamps) */
-uint dhd_sdiod_drive_strength = 6;
-module_param(dhd_sdiod_drive_strength, uint, 0);
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
+ dhd_sta_free(dhdp, sta);
+ }
+}
-#ifdef BCMSDIO
-/* Tx/Rx bounds */
-extern uint dhd_txbound;
-extern uint dhd_rxbound;
-module_param(dhd_txbound, uint, 0);
-module_param(dhd_rxbound, uint, 0);
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
-/* Deferred transmits */
-extern uint dhd_deferred_tx;
-module_param(dhd_deferred_tx, uint, 0);
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
-#endif /* BCMSDIO */
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_INFO(("%s: Found STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG((char *)ea)));
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ return sta;
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-#ifdef SDTEST
-/* Echo packet generator (pkts/s) */
-uint dhd_pktgen = 0;
-module_param(dhd_pktgen, uint, 0);
+ return DHD_STA_NULL;
+}
-/* Echo packet len (0 => sawtooth, max 2040) */
-uint dhd_pktgen_len = 0;
-module_param(dhd_pktgen_len, uint, 0);
-#endif /* SDTEST */
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
-#if defined(BCMSUP_4WAY_HANDSHAKE)
-/* Use in dongle supplicant for 4-way handshake */
-#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
-/* Enable idsup by default (if supported in fw) */
-uint dhd_use_idsup = 1;
-#else
-uint dhd_use_idsup = 0;
-#endif /* WLFBT || WL_ENABLE_IDSUP */
-module_param(dhd_use_idsup, uint, 0);
-#endif /* BCMSUP_4WAY_HANDSHAKE */
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
-#ifndef BCMDBUS
-/* Allow delayed firmware download for debug purpose */
-int allow_delay_fwdl = FALSE;
-module_param(allow_delay_fwdl, int, 0);
-#endif /* !BCMDBUS */
+ if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
+ return DHD_STA_NULL;
+ }
-#ifdef ECOUNTER_PERIODIC_DISABLE
-uint enable_ecounter = FALSE;
-#else
-uint enable_ecounter = TRUE;
-#endif // endif
-module_param(enable_ecounter, uint, 0);
+ sta = dhd_sta_alloc((dhd_pub_t *)pub);
+ if (sta == DHD_STA_NULL) {
+ DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
-extern char dhd_version[];
-extern char fw_version[];
-extern char clm_version[];
-
-int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
-static void dhd_net_if_lock_local(dhd_info_t *dhd);
-static void dhd_net_if_unlock_local(dhd_info_t *dhd);
-static void dhd_suspend_lock(dhd_pub_t *dhdp);
-static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+ memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
-/* Monitor interface */
-int dhd_monitor_init(void *dhd_pub);
-int dhd_monitor_uninit(void);
+ /* link the sta and the dhd interface */
+ sta->ifp = ifp;
+ sta->ifidx = ifidx;
+ INIT_LIST_HEAD(&sta->list);
-#ifdef DHD_PM_CONTROL_FROM_FILE
-bool g_pm_control;
-void sec_control_pm(dhd_pub_t *dhd, uint *);
-#endif /* DHD_PM_CONTROL_FROM_FILE */
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(WL_WIRELESS_EXT)
-struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
-#endif /* defined(WL_WIRELESS_EXT) */
+ list_add_tail(&sta->list, &ifp->sta_list);
-#ifndef BCMDBUS
-static void dhd_dpc(ulong data);
-#endif /* !BCMDBUS */
-/* forward decl */
-extern int dhd_wait_pend8021x(struct net_device *dev);
-void dhd_os_wd_timer_extend(void *bus, bool extend);
+ DHD_ERROR(("%s: Adding STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG((char *)ea)));
-#ifdef TOE
-#ifndef BDC
-#error TOE requires BDC
-#endif /* !BDC */
-static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
-static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
-#endif /* TOE */
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
- wl_event_msg_t *event_ptr, void **data_ptr);
+ return sta;
+}
-#if defined(CONFIG_PM_SLEEP)
-static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+/** Delete all STAs from the interface's STA list. */
+void
+dhd_del_all_sta(void *pub, int ifidx)
{
- int ret = NOTIFY_DONE;
- bool suspend = FALSE;
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif // endif
- dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
+ ifp->phnd_arp_table, FALSE,
+ sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
+ }
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- BCM_REFERENCE(dhdinfo);
- BCM_REFERENCE(suspend);
+ return;
+}
- switch (action) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- suspend = TRUE;
- break;
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- suspend = FALSE;
- break;
- }
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
-#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
- if (suspend) {
- DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
- dhd_wlfc_suspend(&dhdinfo->pub);
- DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
- } else {
- dhd_wlfc_resume(&dhdinfo->pub);
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
}
-#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39))
- dhd_mmc_suspend = suspend;
- smp_mb();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
#endif // endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
+ ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
+ return;
+}
- return ret;
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+
+ sta = dhd_find_sta(pub, ifidx, ea);
+
+ if (!sta) {
+ /* Add entry */
+ sta = dhd_add_sta(pub, ifidx, ea);
+ }
+
+ return sta;
}
-/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
- * created in kernel notifier link list (with 'next' pointing to itself)
- */
-static bool dhd_pm_notifier_registered = FALSE;
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head *
+dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
+{
+ unsigned long flags;
+ dhd_sta_t *sta, *snapshot;
-extern int register_pm_notifier(struct notifier_block *nb);
-extern int unregister_pm_notifier(struct notifier_block *nb);
-#endif /* CONFIG_PM_SLEEP */
+ INIT_LIST_HEAD(snapshot_list);
-/* Request scheduling of the bus rx frame */
-static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
-static void dhd_os_rxflock(dhd_pub_t *pub);
-static void dhd_os_rxfunlock(dhd_pub_t *pub);
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-static void
-dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
-#endif /* DHD_H2D_LOG_TIME_SYNC */
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ /* allocate one and add to snapshot */
+ snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
+ if (snapshot == NULL) {
+ DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
+ continue;
+ }
-/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
-typedef struct dhd_dev_priv {
- dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
- dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
- int ifidx; /* interface index */
- void * lkup;
-} dhd_dev_priv_t;
+ memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
-#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
-#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
-#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
-#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
-#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
-#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
+ INIT_LIST_HEAD(&snapshot->list);
+ list_add_tail(&snapshot->list, snapshot_list);
+ }
-#if defined(DHD_OF_SUPPORT)
-extern int dhd_wlan_init(void);
-#endif /* defined(DHD_OF_SUPPORT) */
-/** Clear the dhd net_device's private structure. */
-static inline void
-dhd_dev_priv_clear(struct net_device * dev)
-{
- dhd_dev_priv_t * dev_priv;
- ASSERT(dev != (struct net_device *)NULL);
- dev_priv = DHD_DEV_PRIV(dev);
- dev_priv->dhd = (dhd_info_t *)NULL;
- dev_priv->ifp = (dhd_if_t *)NULL;
- dev_priv->ifidx = DHD_BAD_IF;
- dev_priv->lkup = (void *)NULL;
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return snapshot_list;
}
-/** Setup the dhd net_device's private structure. */
-static inline void
-dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
- int ifidx)
+static void
+dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
{
- dhd_dev_priv_t * dev_priv;
- ASSERT(dev != (struct net_device *)NULL);
- dev_priv = DHD_DEV_PRIV(dev);
- dev_priv->dhd = dhd;
- dev_priv->ifp = ifp;
- dev_priv->ifidx = ifidx;
+ dhd_sta_t *sta, *next;
+
+ list_for_each_entry_safe(sta, next, snapshot_list, list) {
+ list_del(&sta->list);
+ MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+ }
}
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-/* Return interface pointer */
-static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
-{
- ASSERT(ifidx < DHD_MAX_IFS);
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
- if (ifidx >= DHD_MAX_IFS)
- return NULL;
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+void
+dhd_axi_error_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ schedule_work(&dhd->axi_error_dispatcher_work);
+}
- return dhdp->info->iflist[ifidx];
+static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, axi_error_dispatcher_work);
+ dhd_axi_error(&dhd->pub);
}
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
-#ifdef PCIE_FULL_DONGLE
+/** Returns dhd iflist index corresponding the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+ dhd_if_t *ifp;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
-/** Dummy objects are defined with state representing bad|down.
- * Performance gains from reducing branch conditionals, instruction parallelism,
- * dual issue, reducing load shadows, avail of larger pipelines.
- * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
- * is accessed via the dhd_sta_t.
- */
+ ASSERT(bssidx < DHD_MAX_IFS);
+ ASSERT(dhdp);
-/* Dummy dhd_info object */
-dhd_info_t dhd_info_null = {
- .pub = {
- .info = &dhd_info_null,
-#ifdef DHDTCPACK_SUPPRESS
- .tcpack_sup_mode = TCPACK_SUP_REPLACE,
-#endif /* DHDTCPACK_SUPPRESS */
- .up = FALSE,
- .busstate = DHD_BUS_DOWN
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ifp = dhd->iflist[i];
+ if (ifp && (ifp->bssidx == bssidx)) {
+ DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+ ifp->name, bssidx, i));
+ break;
+ }
}
-};
-#define DHD_INFO_NULL (&dhd_info_null)
-#define DHD_PUB_NULL (&dhd_info_null.pub)
-
-/* Dummy netdevice object */
-struct net_device dhd_net_dev_null = {
- .reg_state = NETREG_UNREGISTERED
-};
-#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
-
-/* Dummy dhd_if object */
-dhd_if_t dhd_if_null = {
-#ifdef WMF
- .wmf = { .wmf_enable = TRUE },
-#endif // endif
- .info = DHD_INFO_NULL,
- .net = DHD_NET_DEV_NULL,
- .idx = DHD_BAD_IF
-};
-#define DHD_IF_NULL (&dhd_if_null)
-
-#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+ return i;
+}
-/** Interface STA list management. */
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+ uint32 store_idx;
+ uint32 sent_idx;
-/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
-static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
-static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+ if (!skb) {
+ DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+ return BCME_ERROR;
+ }
-/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
-static void dhd_if_del_sta_list(dhd_if_t * ifp);
-static void dhd_if_flush_sta(dhd_if_t * ifp);
+ dhd_os_rxflock(dhdp);
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ if (dhdp->skbbuf[store_idx] != NULL) {
+ /* Make sure the previous packets are processed */
+ dhd_os_rxfunlock(dhdp);
+ DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ /* removed msleep here, should use wait_event_timeout if we
+ * want to give rx frame thread a chance to run
+ */
+#if defined(WAIT_DEQUEUE)
+ OSL_SLEEP(1);
+#endif // endif
+ return BCME_ERROR;
+ }
+ DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+ skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+ dhdp->skbbuf[store_idx] = skb;
+ dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+ dhd_os_rxfunlock(dhdp);
-/* Construct/Destruct a sta pool. */
-static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
-static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
-/* Clear the pool of dhd_sta_t objects for built-in type driver */
-static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
+ return BCME_OK;
+}
-/** Reset a dhd_sta object and free into the dhd pool. */
-static void
-dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
{
- int prio;
-
- ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
-
- ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+ uint32 store_idx;
+ uint32 sent_idx;
+ void *skb;
- /*
- * Flush and free all packets in all flowring's queues belonging to sta.
- * Packets in flow ring will be flushed later.
- */
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- uint16 flowid = sta->flowid[prio];
+ dhd_os_rxflock(dhdp);
- if (flowid != FLOWID_INVALID) {
- unsigned long flags;
- flow_ring_node_t * flow_ring_node;
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ skb = dhdp->skbbuf[sent_idx];
-#ifdef DHDTCPACK_SUPPRESS
- /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
- * when there is a newly coming packet from network stack.
- */
- dhd_tcpack_info_tbl_clean(dhdp);
-#endif /* DHDTCPACK_SUPPRESS */
+ if (skb == NULL) {
+ dhd_os_rxfunlock(dhdp);
+ DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+ store_idx, sent_idx));
+ return NULL;
+ }
- flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
- if (flow_ring_node) {
- flow_queue_t *queue = &flow_ring_node->queue;
+ dhdp->skbbuf[sent_idx] = NULL;
+ dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+ DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+ skb, sent_idx));
- if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
- void * pkt;
- while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
- NULL) {
- PKTFREE(dhdp->osh, pkt, TRUE);
- }
- }
+ dhd_os_rxfunlock(dhdp);
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
- }
- }
+ return skb;
+}
- sta->flowid[prio] = FLOWID_INVALID;
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+ if (prepost) { /* pre process */
+ dhd_read_cis(dhdp);
+ dhd_check_module_cid(dhdp);
+ dhd_check_module_mac(dhdp);
+ dhd_set_macaddr_from_file(dhdp);
+ } else { /* post process */
+ dhd_write_macaddr(&dhdp->mac);
+ dhd_clear_cis(dhdp);
}
- id16_map_free(dhdp->staid_allocator, sta->idx);
- DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
- sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
- sta->ifidx = DHD_BAD_IF;
- bzero(sta->ea.octet, ETHER_ADDR_LEN);
- INIT_LIST_HEAD(&sta->list);
- sta->idx = ID16_INVALID; /* implying free */
+ return 0;
}
-/** Allocate a dhd_sta object from the dhd pool. */
-static dhd_sta_t *
-dhd_sta_alloc(dhd_pub_t * dhdp)
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
+static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
{
- uint16 idx;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+ unsigned long flags = 0;
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
- ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ cfg = wl_get_cfg(primary_ndev);
- idx = id16_map_alloc(dhdp->staid_allocator);
- if (idx == ID16_INVALID) {
- DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
- return DHD_STA_NULL;
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
}
- sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
- sta = &sta_pool[idx];
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- ASSERT((sta->idx == ID16_INVALID) &&
- (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* check for hal started and only then send event if not clear dump state here */
+ if (wl_cfg80211_is_hal_started(cfg)) {
+ int timeleft = 0;
- DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+ DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
+ dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
- sta->idx = idx; /* implying allocated */
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
+ &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
+ if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
+ DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ }
+ } else {
+ DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
+ }
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ /* In case of dhd_os_busbusy_wait_bitmask() timeout,
+ * hal dump bit will not be cleared. Hence clearing it here.
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- return sta;
+ return BCME_OK;
}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
-/** Delete all STAs in an interface's STA list. */
-static void
-dhd_if_del_sta_list(dhd_if_t *ifp)
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(PKT_FILTER_SUPPORT)
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
{
- dhd_sta_t *sta, *next;
- unsigned long flags;
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
+ bool _apply = FALSE;
+ /* In case of IBSS mode, apply arp pkt filter */
+ if (op_mode_param & DHD_FLAG_IBSS_MODE) {
+ _apply = TRUE;
+ goto exit;
+ }
+ /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+ if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
+ _apply = TRUE;
+ goto exit;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- return;
+exit:
+ return _apply;
}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
-/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
-static void
-dhd_if_flush_sta(dhd_if_t * ifp)
+void
+dhd_set_packet_filter(dhd_pub_t *dhd)
{
-}
-
-/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
-static int
-dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
-{
- int idx, prio, sta_pool_memsz;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
- void * staid_allocator;
-
- ASSERT(dhdp != (dhd_pub_t *)NULL);
- ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
-
- /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
- staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
- if (staid_allocator == NULL) {
- DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- /* Pre allocate a pool of dhd_sta objects (one extra). */
- sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
- sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
- if (sta_pool == NULL) {
- DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
- id16_map_fini(dhdp->osh, staid_allocator);
- return BCME_ERROR;
- }
-
- dhdp->sta_pool = sta_pool;
- dhdp->staid_allocator = staid_allocator;
-
- /* Initialize all sta(s) for the pre-allocated free pool. */
- bzero((uchar *)sta_pool, sta_pool_memsz);
- for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
- sta = &sta_pool[idx];
- sta->idx = id16_map_alloc(staid_allocator);
- ASSERT(sta->idx <= max_sta);
- }
-
- /* Now place them into the pre-allocated free pool. */
- for (idx = 1; idx <= max_sta; idx++) {
- sta = &sta_pool[idx];
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
- }
- dhd_sta_free(dhdp, sta);
- }
-
- return BCME_OK;
-}
-
-/** Destruct the pool of dhd_sta_t objects.
- * Caller must ensure that no STA objects are currently associated with an if.
- */
-static void
-dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
-{
- dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
-
- if (sta_pool) {
- int idx;
- int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
- for (idx = 1; idx <= max_sta; idx++) {
- ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
- ASSERT(sta_pool[idx].idx == ID16_INVALID);
- }
- MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
- dhdp->sta_pool = NULL;
- }
-
- id16_map_fini(dhdp->osh, dhdp->staid_allocator);
- dhdp->staid_allocator = NULL;
-}
-
-/* Clear the pool of dhd_sta_t objects for built-in type driver */
-static void
-dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
-{
- int idx, prio, sta_pool_memsz;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
- void *staid_allocator;
-
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
- staid_allocator = dhdp->staid_allocator;
-
- if (!sta_pool) {
- DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!staid_allocator) {
- DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
- return;
- }
-
- /* clear free pool */
- sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
- bzero((uchar *)sta_pool, sta_pool_memsz);
-
- /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
- id16_map_clear(staid_allocator, max_sta, 1);
-
- /* Initialize all sta(s) for the pre-allocated free pool. */
- for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
- sta = &sta_pool[idx];
- sta->idx = id16_map_alloc(staid_allocator);
- ASSERT(sta->idx <= max_sta);
- }
- /* Now place them into the pre-allocated free pool. */
- for (idx = 1; idx <= max_sta; idx++) {
- sta = &sta_pool[idx];
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
- }
- dhd_sta_free(dhdp, sta);
- }
-}
-
-/** Find STA with MAC address ea in an interface's STA list. */
-dhd_sta_t *
-dhd_find_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
- dhd_if_t *ifp;
- unsigned long flags;
-
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return DHD_STA_NULL;
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry(sta, &ifp->sta_list, list) {
- if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
- DHD_INFO(("%s: Found STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG((char *)ea)));
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- return sta;
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-
- return DHD_STA_NULL;
-}
-
-/** Add STA into the interface's STA list. */
-dhd_sta_t *
-dhd_add_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
- dhd_if_t *ifp;
- unsigned long flags;
-
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return DHD_STA_NULL;
-
- if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
- return DHD_STA_NULL;
- }
-
- sta = dhd_sta_alloc((dhd_pub_t *)pub);
- if (sta == DHD_STA_NULL) {
- DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
- return DHD_STA_NULL;
- }
-
- memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
-
- /* link the sta and the dhd interface */
- sta->ifp = ifp;
- sta->ifidx = ifidx;
- INIT_LIST_HEAD(&sta->list);
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-
- list_add_tail(&sta->list, &ifp->sta_list);
-
- DHD_ERROR(("%s: Adding STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG((char *)ea)));
-
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-
- return sta;
-}
-
-/** Delete all STAs from the interface's STA list. */
-void
-dhd_del_all_sta(void *pub, int ifidx)
-{
- dhd_sta_t *sta, *next;
- dhd_if_t *ifp;
- unsigned long flags;
-
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return;
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
-
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
-#ifdef DHD_L2_FILTER
- if (ifp->parp_enable) {
- /* clear Proxy ARP cache of specific Ethernet Address */
- bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
- ifp->phnd_arp_table, FALSE,
- sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
- }
-#endif /* DHD_L2_FILTER */
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-
- return;
-}
-
-/** Delete STA from the interface's STA list. */
-void
-dhd_del_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta, *next;
- dhd_if_t *ifp;
- unsigned long flags;
-
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return;
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
- if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-#ifdef DHD_L2_FILTER
- if (ifp->parp_enable) {
- /* clear Proxy ARP cache of specific Ethernet Address */
- bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
- ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
- }
-#endif /* DHD_L2_FILTER */
- return;
-}
-
-/** Add STA if it doesn't exist. Not reentrant. */
-dhd_sta_t*
-dhd_findadd_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
-
- sta = dhd_find_sta(pub, ifidx, ea);
-
- if (!sta) {
- /* Add entry */
- sta = dhd_add_sta(pub, ifidx, ea);
- }
-
- return sta;
-}
-
-#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
-static struct list_head *
-dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
-{
- unsigned long flags;
- dhd_sta_t *sta, *snapshot;
-
- INIT_LIST_HEAD(snapshot_list);
-
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-
- list_for_each_entry(sta, &ifp->sta_list, list) {
- /* allocate one and add to snapshot */
- snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
- if (snapshot == NULL) {
- DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
- continue;
- }
-
- memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
-
- INIT_LIST_HEAD(&snapshot->list);
- list_add_tail(&snapshot->list, snapshot_list);
- }
-
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-
- return snapshot_list;
-}
-
-static void
-dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
-{
- dhd_sta_t *sta, *next;
-
- list_for_each_entry_safe(sta, next, snapshot_list, list) {
- list_del(&sta->list);
- MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
- }
-}
-#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-
-#else
-static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
-static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
-static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
-static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
-static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
-dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
-dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
-void dhd_del_sta(void *pub, int ifidx, void *ea) {}
-#endif /* PCIE_FULL_DONGLE */
-
-#if defined(DHD_LB)
-
-#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
- defined(DHD_LB_RXP)
-/**
- * dhd_tasklet_schedule - Function that runs in IPI context of the destination
- * CPU and schedules a tasklet.
- * @tasklet: opaque pointer to the tasklet
- */
-INLINE void
-dhd_tasklet_schedule(void *tasklet)
-{
- tasklet_schedule((struct tasklet_struct *)tasklet);
-}
-/**
- * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
- * @tasklet: tasklet to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * smp_call_function_single with no wait and the tasklet_schedule function
- * will be invoked to schedule the specified tasklet on the requested CPU.
- */
-INLINE void
-dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
-{
- const int wait = 0;
- smp_call_function_single(on_cpu,
- dhd_tasklet_schedule, (void *)tasklet, wait);
-}
-
-/**
- * dhd_work_schedule_on - Executes the passed work in a given CPU
- * @work: work to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * schedule_work_on and the work function
- * will be invoked to schedule the specified work on the requested CPU.
- */
-
-INLINE void
-dhd_work_schedule_on(struct work_struct *work, int on_cpu)
-{
- schedule_work_on(on_cpu, work);
-}
-#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
-
-#if defined(DHD_LB_TXC)
-/**
- * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
- * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
- * freeing the packets placed in the tx_compl workq
- */
-void
-dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- on_cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- } else {
- schedule_work(&dhd->tx_compl_dispatcher_work);
- }
-}
-
-static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->tx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- else
- dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
- put_online_cpus();
-}
-#endif /* DHD_LB_TXC */
-
-#if defined(DHD_LB_RXC)
-/**
- * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
- * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
- * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
- * placed in the rx_compl workq.
- *
- * @dhdp: pointer to dhd_pub object
- */
-void
-dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
- on_cpu = atomic_read(&dhd->rx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- } else {
- schedule_work(&dhd->rx_compl_dispatcher_work);
- }
-}
-
-static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->rx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- else {
- dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
- }
- put_online_cpus();
-}
-#endif /* DHD_LB_RXC */
-
-#if defined(DHD_LB_TXP)
-static void dhd_tx_dispatcher_work(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- dhd_tasklet_schedule(&dhd->tx_tasklet);
-}
-
-static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
-{
- int cpu;
- int net_tx_cpu;
- dhd_info_t *dhd = dhdp->info;
-
- preempt_disable();
- cpu = atomic_read(&dhd->tx_cpu);
- net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
-
- /*
- * Now if the NET_TX has pushed the packet in the same
- * CPU that is chosen for Tx processing, seperate it out
- * i.e run the TX processing tasklet in compl_cpu
- */
- if (net_tx_cpu == cpu)
- cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if (!cpu_online(cpu)) {
- /*
- * Ooohh... but the Chosen CPU is not online,
- * Do the job in the current CPU itself.
- */
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- } else {
- /*
- * Schedule tx_dispatcher_work to on the cpu which
- * in turn will schedule tx_tasklet.
- */
- dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
- * on another cpu. The tx_tasklet will take care of actually putting
- * the skbs into appropriate flow ring and ringing H2D interrupt
- *
- * @dhdp: pointer to dhd_pub object
- */
-static void
-dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
-
- curr_cpu = get_cpu();
- put_cpu();
-
- /* Record the CPU in which the TX request from Network stack came */
- atomic_set(&dhd->net_tx_cpu, curr_cpu);
-
- /* Schedule the work to dispatch ... */
- dhd_tx_dispatcher_fn(dhdp);
-}
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-/**
- * dhd_napi_poll - Load balance napi poll function to process received
- * packets and send up the network stack using netif_receive_skb()
- *
- * @napi: napi object in which context this poll function is invoked
- * @budget: number of packets to be processed.
- *
- * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
- * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
- * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
- * packet tag and sendup.
- */
-static int
-dhd_napi_poll(struct napi_struct *napi, int budget)
-{
- int ifid;
- const int pkt_count = 1;
- const int chan = 0;
- struct sk_buff * skb;
- unsigned long flags;
- struct dhd_info *dhd;
- int processed = 0;
- struct sk_buff_head rx_process_queue;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd = container_of(napi, struct dhd_info, rx_napi_struct);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
- __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
- __skb_queue_head_init(&rx_process_queue);
-
- /* extract the entire rx_napi_queue into local rx_process_queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
- OSL_PREFETCH(skb->data);
-
- ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
- __FUNCTION__, skb, ifid));
-
- dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
- processed++;
- }
-
- DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
-
- DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
- napi_complete(napi);
-
- return budget - 1;
-}
-
-/**
- * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
- * poll list. This function may be invoked via the smp_call_function_single
- * from a remote CPU.
- *
- * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
- * after the napi_struct is added to the softnet data's poll_list
- *
- * @info: pointer to a dhd_info struct
- */
-static void
-dhd_napi_schedule(void *info)
-{
- dhd_info_t *dhd = (dhd_info_t *)info;
-
- DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
- __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
-
- /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
- if (napi_schedule_prep(&dhd->rx_napi_struct)) {
- __napi_schedule(&dhd->rx_napi_struct);
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
- }
-
- /*
- * If the rx_napi_struct was already running, then we let it complete
- * processing all its packets. The rx_napi_struct may only run on one
- * core at a time, to avoid out-of-order handling.
- */
-}
-
-/**
- * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
- * action after placing the dhd's rx_process napi object in the the remote CPU's
- * softnet data's poll_list.
- *
- * @dhd: dhd_info which has the rx_process napi object
- * @on_cpu: desired remote CPU id
- */
-static INLINE int
-dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
-{
- int wait = 0; /* asynchronous IPI */
- DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
- __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
-
- if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
- DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
- __FUNCTION__, on_cpu));
- }
-
- DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
-
- return 0;
-}
-
-/*
- * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
- * Why should we do this?
- * The candidacy algorithm is run from the call back function
- * registered to CPU hotplug notifier. This call back happens from Worker
- * context. The dhd_napi_schedule_on is also from worker context.
- * Note that both of this can run on two different CPUs at the same time.
- * So we can possibly have a window where a given CPUn is being brought
- * down from CPUm while we try to run a function on CPUn.
- * To prevent this its better have the whole code to execute an SMP
- * function under get_online_cpus.
- * This function call ensures that hotplug mechanism does not kick-in
- * until we are done dealing with online CPUs
- * If the hotplug worker is already running, no worries because the
- * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
- *
- * The below mentioned code structure is proposed in
- * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
- * for the question
- * Q: I need to ensure that a particular cpu is not removed when there is some
- * work specific to this cpu is in progress
- *
- * According to the documentation calling get_online_cpus is NOT required, if
- * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
- * run from Work Queue context we have to call these functions
- */
-static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_napi_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- dhd_napi_schedule(dhd);
-}
-
-/**
- * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
- * to run on another CPU. The rx_napi_struct's poll function will retrieve all
- * the packets enqueued into the rx_napi_queue and sendup.
- * The producer's rx packet queue is appended to the rx_napi_queue before
- * dispatching the rx_napi_struct.
- */
-void
-dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
-{
- unsigned long flags;
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
- int on_cpu;
-#ifdef DHD_LB_IRQSET
- cpumask_t cpus;
-#endif /* DHD_LB_IRQSET */
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
- skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
-
- /* append the producer's queue of packets to the napi's rx process queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- preempt_disable();
- on_cpu = atomic_read(&dhd->rx_napi_cpu);
-#ifdef DHD_LB_IRQSET
- if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
- (!cpu_online(on_cpu)))
-#else
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu)))
-#endif /* DHD_LB_IRQSET */
- {
- DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
- curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_napi_schedule(dhd);
- } else {
- DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
- __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
- */
-void
-dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
-{
- dhd_info_t *dhd = dhdp->info;
-
- DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
- pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
- DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
- __skb_queue_tail(&dhd->rx_pend_queue, pkt);
-}
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LB_IRQSET
-void
-dhd_irq_set_affinity(dhd_pub_t *dhdp)
-{
- unsigned int irq = (unsigned int)-1;
- int err = BCME_OK;
-
- if (!dhdp) {
- DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!dhdp->bus) {
- DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
- return;
- }
-
- dhdpcie_get_pcieirq(dhdp->bus, &irq);
- err = irq_set_affinity(irq, dhdp->info->cpumask_primary);
- if (err)
- DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
- __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary)));
-}
-#endif /* DHD_LB_IRQSET */
-#endif /* DHD_LB */
-
-/** Returns dhd iflist index corresponding the the bssidx provided by apps */
-int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
-{
- dhd_if_t *ifp;
- dhd_info_t *dhd = dhdp->info;
- int i;
-
- ASSERT(bssidx < DHD_MAX_IFS);
- ASSERT(dhdp);
-
- for (i = 0; i < DHD_MAX_IFS; i++) {
- ifp = dhd->iflist[i];
- if (ifp && (ifp->bssidx == bssidx)) {
- DHD_TRACE(("Index manipulated for %s from %d to %d\n",
- ifp->name, bssidx, i));
- break;
- }
- }
- return i;
-}
-
-static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
-{
- uint32 store_idx;
- uint32 sent_idx;
-
- if (!skb) {
- DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
- return BCME_ERROR;
- }
-
- dhd_os_rxflock(dhdp);
- store_idx = dhdp->store_idx;
- sent_idx = dhdp->sent_idx;
- if (dhdp->skbbuf[store_idx] != NULL) {
- /* Make sure the previous packets are processed */
- dhd_os_rxfunlock(dhdp);
- DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
- skb, store_idx, sent_idx));
- /* removed msleep here, should use wait_event_timeout if we
- * want to give rx frame thread a chance to run
- */
-#if defined(WAIT_DEQUEUE)
- OSL_SLEEP(1);
-#endif // endif
- return BCME_ERROR;
- }
- DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
- skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
- dhdp->skbbuf[store_idx] = skb;
- dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
- dhd_os_rxfunlock(dhdp);
-
- return BCME_OK;
-}
-
-static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
-{
- uint32 store_idx;
- uint32 sent_idx;
- void *skb;
-
- dhd_os_rxflock(dhdp);
-
- store_idx = dhdp->store_idx;
- sent_idx = dhdp->sent_idx;
- skb = dhdp->skbbuf[sent_idx];
-
- if (skb == NULL) {
- dhd_os_rxfunlock(dhdp);
- DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
- store_idx, sent_idx));
- return NULL;
- }
-
- dhdp->skbbuf[sent_idx] = NULL;
- dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
-
- DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
- skb, sent_idx));
-
- dhd_os_rxfunlock(dhdp);
-
- return skb;
-}
-
-int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
-{
- if (prepost) { /* pre process */
- dhd_read_cis(dhdp);
- dhd_check_module_cid(dhdp);
- dhd_check_module_mac(dhdp);
- dhd_set_macaddr_from_file(dhdp);
- } else { /* post process */
- dhd_write_macaddr(&dhdp->mac);
- dhd_clear_cis(dhdp);
- }
-
- return 0;
-}
-
-// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
-#if defined(PKT_FILTER_SUPPORT)
-#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
-static bool
-_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
-{
- bool _apply = FALSE;
- /* In case of IBSS mode, apply arp pkt filter */
- if (op_mode_param & DHD_FLAG_IBSS_MODE) {
- _apply = TRUE;
- goto exit;
- }
- /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
- if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
- _apply = TRUE;
- goto exit;
- }
-
-exit:
- return _apply;
-}
-#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
-
-void
-dhd_set_packet_filter(dhd_pub_t *dhd)
-{
- int i;
+ int i;
DHD_TRACE(("%s: enter\n", __FUNCTION__));
if (dhd_pkt_filter_enable) {
int i;
DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
- if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
+ if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
+ !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
return;
}
/* 1 - Enable packet filter, only allow unicast packet to send up */
/* 0 - Disable packet filter */
if (dhd_pkt_filter_enable && (!value ||
- (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+ (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
+ dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
{
for (i = 0; i < dhd->pktfilter_count; i++) {
// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
uint roamvar = dhd->conf->roam_off_suspend;
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- int bcn_li_bcn;
+ int bcn_li_bcn = 1;
#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
uint nd_ra_filter = 0;
#ifdef ENABLE_IPMCAST_FILTER
dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
#endif /* APF */
#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd_arp_offload_enable(dhd, TRUE);
+#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PASS_ALL_MCAST_PKTS
allmulti = 0;
sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
#endif /* OEM_ANDROID && BCMPCIE */
-
+#ifdef WL_CFG80211
+ /* Disable cfg80211 feature events during suspend */
+ ret = wl_cfg80211_config_suspend_events(
+ dhd_linux_get_primary_netdev(dhd), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to disable events (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 */
#ifdef DHD_USE_EARLYSUSPEND
#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
}
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- bcn_li_bcn = 0;
+ if (bcn_li_dtim) {
+ bcn_li_bcn = 0;
+ }
ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
sizeof(bcn_li_bcn), NULL, 0, TRUE);
if (ret < 0) {
__FUNCTION__, ret));
}
#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ if (!dhd->sroamed) {
+ ret = dhd_sroam_set_mon(dhd, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
#endif /* DHD_USE_EARLYSUSPEND */
- dhd_conf_set_suspend_resume(dhd, value);
} else {
- dhd_conf_set_suspend_resume(dhd, value);
#ifdef PKT_FILTER_SUPPORT
dhd->early_suspended = 0;
#endif // endif
ret));
}
#endif /* WL_CF80211 && WL_BCNRECV */
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PKT_FILTER_SUPPORT
/* disable pkt filter */
dhd_enable_packet_filter(0, dhd);
}
#endif /* ENABLE_FW_ROAM_SUSPEND */
#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- bcn_li_bcn = 1;
ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
sizeof(bcn_li_bcn), NULL, 0, TRUE);
if (ret < 0) {
__FUNCTION__, ret));
}
#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef CONFIG_SILENT_ROAM
+ ret = dhd_sroam_set_mon(dhd, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* CONFIG_SILENT_ROAM */
#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef WL_CFG80211
+ /* Enable cfg80211 feature events during resume */
+ ret = wl_cfg80211_config_suspend_events(
+ dhd_linux_get_primary_netdev(dhd), TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to enable events (%d)\n", ret));
+ }
+#endif /* WL_CFG80211 */
#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(dhd);
+ dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
#endif /* DHD_LB_IRQSET */
-
- /* terence 2017029: Reject in early suspend */
- if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
- }
}
}
dhd_suspend_unlock(dhd);
/* Set flag when early suspend was called */
dhdp->in_suspend = val;
if ((force || !dhdp->suspend_disable_flag) &&
- (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp)))
+ (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
{
ret = dhd_set_suspend(val, dhdp);
}
if (tmo->increment > tmo->tick)
tmo->increment = tmo->tick;
} else {
- wait_queue_head_t delay_wait;
- DECLARE_WAITQUEUE(wait, current);
- init_waitqueue_head(&delay_wait);
- add_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- (void)schedule_timeout(1);
- remove_wait_queue(&delay_wait, &wait);
- set_current_state(TASK_RUNNING);
+ /*
+ * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
+ * context where the exact wakeup time is flexible, it would be good
+ * to use usleep_range() instead of udelay(). It takes a few advantages
+ * such as improving responsiveness and reducing power.
+ */
+ OSL_SLEEP(jiffies_to_msecs(1));
}
return 0;
_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
{
struct net_device *dev;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *mclist;
-#endif // endif
uint32 allmulti, cnt;
wl_ioctl_t ioc;
dev = dhd->iflist[i]->net;
if (!dev)
continue;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt_iface[i] = netdev_mc_count(dev);
cnt += cnt_iface[i];
-#else
- cnt += dev->mc_count;
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
/* Determine initial value of allmulti flag */
allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
dev = dhd->iflist[ifidx]->net;
if (!dev)
return;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt = netdev_mc_count(dev);
-#else
- cnt = dev->mc_count;
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
/* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
dev = dhd->iflist[i]->net;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
cnt_iface[i], MAC2STRDBG(ha->addr)));
cnt_iface[i]--;
}
-#else /* LINUX < 2.6.35 */
- for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0));
- cnt_iface[i]--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
}
}
#else /* !MCAST_LIST_ACCUMULATION */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif // endif
-#else /* LINUX < 2.6.35 */
- for (mclist = dev->mc_list; (mclist && (cnt > 0));
- cnt--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
-#endif /* LINUX >= 2.6.35 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif /* LINUX >= 2.6.27 */
#endif /* MCAST_LIST_ACCUMULATION */
memset(&ioc, 0, sizeof(ioc));
memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
if (ifidx == 0)
memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+ DHD_ERROR(("%s: MACID is overwritten ifidx=%d, mac=%pM\n", __FUNCTION__, ifidx, addr));
}
return ret;
int ret;
#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
struct wl_if_event_info info;
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ struct net_device *ndev = NULL;
+#endif
#else
struct net_device *ndev;
#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
mac_addr = NULL;
}
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
+ &info, mac_addr, NULL, true)) == NULL)
+#else
if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
- &info, mac_addr, NULL, true) == NULL) {
+ &info, mac_addr, NULL, true) == NULL)
+#endif
+ {
/* Do the post interface create ops */
DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
goto done;
done:
MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ dhd_bridge_dev_set(dhd, ifidx, ndev);
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
ifidx = if_event->event.ifidx;
DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+ dhd_bridge_dev_set(dhd, ifidx, NULL);
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
DHD_PERIM_UNLOCK(&dhd->pub);
if (!dhd->pub.info->iflist[ifidx]) {
return 1;
#ifdef BCMDBUS
spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
-#else
- spin_unlock_bh(&di->wlfc_spinlock);
-#endif /* BCMDBUS */
- return 1;
-}
-
-#endif /* PROP_TXSTATUS */
-
-#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
-typedef struct {
- uint16 type;
- const char *str;
-} PKTTYPE_INFO;
-
-static const PKTTYPE_INFO packet_type_info[] =
-{
- { ETHER_TYPE_IP, "IP" },
- { ETHER_TYPE_ARP, "ARP" },
- { ETHER_TYPE_BRCM, "BRCM" },
- { ETHER_TYPE_802_1X, "802.1X" },
- { ETHER_TYPE_WAI, "WAPI" },
- { 0, ""}
-};
-
-static const char *_get_packet_type_str(uint16 type)
-{
- int i;
- int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
-
- for (i = 0; i < n; i++) {
- if (packet_type_info[i].type == type)
- return packet_type_info[i].str;
- }
-
- return packet_type_info[n].str;
-}
-
-void
-dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx)
-{
- uint16 protocol;
- char *ifname;
-
- protocol = (dump_data[12] << 8) | dump_data[13];
- ifname = ndev ? ndev->name : "N/A";
-
- if (protocol != ETHER_TYPE_BRCM) {
- DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname,
- _get_packet_type_str(protocol)));
-#if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
- prhex("Data", dump_data, datalen);
-#endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
- }
+#else
+ spin_unlock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+ return 1;
}
-#endif /* DHD_TX_DUMP || DHD_RX_DUMP */
+
+#endif /* PROP_TXSTATUS */
/* This routine do not support Packet chain feature, Currently tested for
* proxy arp feature
*/
bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- ulong flags;
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
}
int ret = BCME_OK;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh = NULL;
+ bool pkt_ether_type_802_1x = FALSE;
+ uint8 pkt_flow_prio;
+
#if defined(DHD_L2_FILTER)
dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
#endif // endif
if (dhdp->busstate == DHD_BUS_SUSPEND) {
DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
PKTCFREE(dhdp->osh, pktbuf, TRUE);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
}
#endif /* PCIE_FULL_DONGLE */
/* back up 802.1x's priority */
dhdp->prio_8021x = prio;
#endif /* DHD_LOSSLESS_ROAMING */
+ pkt_ether_type_802_1x = TRUE;
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
atomic_inc(&dhd->pend_8021x_cnt);
#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
#endif /* WL_CFG80211 && WL_WPS_SYNC */
- dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), pktdata, TRUE);
- }
-
- if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
-#ifdef DHD_DHCP_DUMP
- dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
-#endif /* DHD_DHCP_DUMP */
-#ifdef DHD_ICMP_DUMP
- dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
-#endif /* DHD_ICMP_DUMP */
}
+ dhd_dump_pkt(dhdp, ifidx, pktdata,
+ (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
} else {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
}
{
#endif /* !PKTPRIO_OVERRIDE */
}
+ BCM_REFERENCE(pkt_ether_type_802_1x);
+ BCM_REFERENCE(pkt_flow_prio);
+
+#ifdef SUPPORT_SET_TID
+ dhd_set_tid_based_on_uid(dhdp, pktbuf);
+#endif /* SUPPORT_SET_TID */
+
#ifdef PCIE_FULL_DONGLE
/*
* Lkup the per interface hash table, for a matching flowring. If one is not
* available, allocate a unique flowid and add a flowring entry.
* The found or newly created flowid is placed into the pktbuf's tag.
*/
- ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+
+#ifdef DHD_LOSSLESS_ROAMING
+ /* For LLR override and use flowring with prio 7 for 802.1x packets */
+ if (pkt_ether_type_802_1x) {
+ pkt_flow_prio = PRIO_8021D_NC;
+ } else
+#endif /* DHD_LOSSLESS_ROAMING */
+ {
+ pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
+ }
+
+ ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
if (ret != BCME_OK) {
PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
return ret;
}
-#endif // endif
-
-#if defined(DHD_TX_DUMP)
- dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf),
- PKTLEN(dhdp->osh, pktbuf), TRUE);
-#endif
+#endif /* PCIE_FULL_DONGLE */
/* terence 20150901: Micky add to ajust the 802.1X priority */
/* Set the 802.1X packet with the highest priority 7 */
if (dhdp->conf->pktprio8021x >= 0)
return ret;
}
-#if defined(DHD_LB_TXP)
-
-int BCMFASTPATH
-dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
- int ifidx, void *skb)
-{
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
-
- /* If the feature is disabled run-time do TX from here */
- if (atomic_read(&dhd->lb_txp_active) == 0) {
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
- return __dhd_sendpkt(&dhd->pub, ifidx, skb);
- }
-
- /* Store the address of net device and interface index in the Packet tag */
- DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
- DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
-
- /* Enqueue the skb into tx_pend_queue */
- skb_queue_tail(&dhd->tx_pend_queue, skb);
-
- DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
-
- /* Dispatch the Tx job to be processed by the tx_tasklet */
- dhd_lb_tx_dispatch(&dhd->pub);
-
- return NETDEV_TX_OK;
-}
-#endif /* DHD_LB_TXP */
-
int BCMFASTPATH
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
return -ENODEV;
}
- /* terence 2017029: Reject in early suspend */
- if ((dhd->pub.conf->insuspend & NO_TXDATA_IN_SUSPEND) && dhd->pub.early_suspended) {
- dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
- return NETDEV_TX_BUSY;
-#endif
- }
-
DHD_GENERAL_LOCK(&dhd->pub, flags);
DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
#endif /* PCIE_FULL_DONGLE */
dhd_os_busbusy_wake(&dhd->pub);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
#else
if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
ifp = DHD_DEV_IFP(net);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return -ENODEV;
-#else
return NETDEV_TX_BUSY;
-#endif // endif
}
DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
}
+ /* move from dhdsdio_sendfromq(), try to orphan skb early */
+ if (dhd->pub.conf->orphan_move == 2)
+ PKTORPHAN(skb, dhd->pub.conf->tsq);
+ else if (dhd->pub.conf->orphan_move == 3)
+ skb_orphan(skb);
+
/* Convert to packet */
if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
}
#endif /* DHD_PSTA */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
+ ifp->tsyncack_txed ++;
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
/* If this packet has been hold or got freed, just return */
DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
/* Return ok: we always eat the packet */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
- return 0;
-#else
return NETDEV_TX_OK;
-#endif // endif
}
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
}
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+static void
+__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
+{
+
+ if ((state == ON) && (dhdp->txoff == FALSE)) {
+ netif_stop_queue(net);
+ dhd_prot_update_pktid_txq_stop_cnt(dhdp);
+ } else if (state == ON) {
+ DHD_ERROR(("%s: Netif Queue has already stopped\n", __FUNCTION__));
+ }
+ if ((state == OFF) && (dhdp->txoff == TRUE)) {
+ netif_wake_queue(net);
+ dhd_prot_update_pktid_txq_start_cnt(dhdp);
+ } else if (state == OFF) {
+ DHD_ERROR(("%s: Netif Queue has already started\n", __FUNCTION__));
+ }
+}
+
void
dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
{
#endif // endif
if (ifidx == ALL_INTERFACES) {
- /* Flow control on all active interfaces */
- dhdp->txoff = state;
for (i = 0; i < DHD_MAX_IFS; i++) {
if (dhd->iflist[i]) {
net = dhd->iflist[i]->net;
- if (state == ON)
- netif_stop_queue(net);
- else
- netif_wake_queue(net);
+ __dhd_txflowcontrol(dhdp, net, state);
}
}
} else {
if (dhd->iflist[ifidx]) {
net = dhd->iflist[ifidx]->net;
- if (state == ON)
- netif_stop_queue(net);
- else
- netif_wake_queue(net);
+ __dhd_txflowcontrol(dhdp, net, state);
}
}
+ dhdp->txoff = state;
}
#ifdef DHD_MCAST_REGEN
* does netif_rx, disables irq, raise NET_IF_RX softirq and
* enables interrupts back
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- ulong flags;
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
static int
*/
pktlen = skb->len + ETH_HLEN;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
pktdata = (void *)skb_mac_header(skb);
-#else
- pktdata = (void *)skb->mac.raw;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
-
ret = wl_host_event_get_data(pktdata, pktlen, &evu);
if (ret != BCME_OK) {
dhd_event_logtrace_pkt_process(dhdp, skb);
}
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
+
/* Send packet up if logtrace_pkt_sendup is TRUE */
if (dhdp->logtrace_pkt_sendup) {
#ifdef DHD_USE_STATIC_CTRLBUF
int ret;
while (1) {
- dhdp->logtrace_thr_ts.entry_time = OSL_SYSUPTIME_US();
+ dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
if (!binary_sema_down(tsk)) {
- dhdp->logtrace_thr_ts.sem_down_time = OSL_SYSUPTIME_US();
+ dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
SMP_RD_BARRIER_DEPENDS();
if (dhd->pub.dongle_reset == FALSE) {
do {
+ /* Check terminated before processing the items */
+ if (tsk->terminated) {
+ DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
+ goto exit;
+ }
#ifdef EWP_EDL
/* check if EDL is being used */
if (dhd->pub.dongle_edl_support) {
}
} while (ret > 0);
}
- /* Check terminated after processing the items */
- if (tsk->terminated) {
- DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
- break;
- }
if (tsk->flush_ind) {
DHD_ERROR(("%s: flushed\n", __FUNCTION__));
- dhdp->logtrace_thr_ts.flush_time = OSL_SYSUPTIME_US();
+ dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
tsk->flush_ind = 0;
complete(&tsk->flushed);
}
} else {
DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
- dhdp->logtrace_thr_ts.unexpected_break_time = OSL_SYSUPTIME_US();
+ dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
break;
}
}
+exit:
complete_and_exit(&tsk->completed, 0);
- dhdp->logtrace_thr_ts.complete_time = OSL_SYSUPTIME_US();
+ dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
}
#else
static void
/* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
- pktsize = ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
- sizeof(uint32);
+ pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
+ sizeof(uint32));
pkt = PKTGET(dhdp->osh, pktsize, FALSE);
if (!pkt) {
DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
#endif /* DHD_WAKE_STATUS */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ BCM_REFERENCE(dump_data);
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
struct ether_header *eh;
eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
+ __FUNCTION__, ifidx));
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ }
+ continue;
+ }
+
ifp = dhd->iflist[ifidx];
if (ifp == NULL) {
DHD_ERROR(("%s: ifp is NULL. drop packet\n",
}
#endif /* MCAST_REGEN */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
+ int delta_sec;
+ int delta_sync;
+ int sync_per_sec;
+ u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ ifp->tsync_rcvd ++;
+ delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
+ delta_sec = curr_time - ifp->last_sync;
+ if (delta_sec > 1) {
+ sync_per_sec = delta_sync/delta_sec;
+ if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
+ schedule_work(&ifp->blk_tsfl_work);
+ DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
+ "sync recvied %d pkt/sec \n",
+ ifidx, sync_per_sec));
+ }
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+ }
+
+ }
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef DHDTCPACK_SUPPRESS
dhd_tcpdata_info_get(dhdp, pktbuf);
#endif // endif
}
}
#endif /* PCIE_FULL_DONGLE */
-
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
+ (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
+ (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
+ DHD_ERROR(("%s: Reassoc is in progress. "
+ "Drop EAPOL M1 frame\n", __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
*/
eth = skb->data;
len = skb->len;
-
dump_data = skb->data;
-
protocol = (skb->data[12] << 8) | skb->data[13];
+
if (protocol == ETHER_TYPE_802_1X) {
DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
#endif /* WL_CFG80211 && WL_WPS_SYNC */
- dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), dump_data, FALSE);
- }
-
- if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
-#ifdef DHD_DHCP_DUMP
- dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
-#endif /* DHD_DHCP_DUMP */
-#ifdef DHD_ICMP_DUMP
- dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
-#endif /* DHD_ICMP_DUMP */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
+ }
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
}
-#ifdef DHD_RX_DUMP
- dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE);
-#endif /* DHD_RX_DUMP */
+ dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
skb->protocol = eth_type_trans(skb, skb->dev);
if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
bcm_event_msg_u_t evu;
- int ret_event;
- int event_type;
+ int ret_event, event_type;
+ void *pkt_data = skb_mac_header(skb);
- ret_event = wl_host_event_get_data(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
- skb_mac_header(skb),
-#else
- skb->mac.raw,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
- len, &evu);
+ ret_event = wl_host_event_get_data(pkt_data, len, &evu);
if (ret_event != BCME_OK) {
DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
}
#endif /* SHOW_LOGTRACE */
- ret_event = dhd_wl_host_event(dhd, ifidx,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
- skb_mac_header(skb),
-#else
- skb->mac.raw,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
- len, &event, &data);
+ ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
wl_event_to_host_order(&event);
if (!tout_ctrl)
#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
-#if defined(DHD_LB_RXP)
DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#if defined(DHD_LB_RXP)
netif_receive_skb(skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
#else /* !defined(DHD_LB_RXP) */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx_ni(skb);
+#endif /* defined(DHD_LB_RXP) */
DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#else
- ulong flags;
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx(skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
-#endif /* !defined(DHD_LB_RXP) */
}
}
}
complete_and_exit(&tsk->completed, 0);
}
-static void dhd_watchdog(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- ulong data
-#endif
-)
+static void dhd_watchdog(ulong data)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- dhd_info_t *dhd = from_timer(dhd, t, timer);
-#else
dhd_info_t *dhd = (dhd_info_t *)data;
-#endif
unsigned long flags;
if (dhd->pub.dongle_reset) {
}
if (down_interruptible(&tsk->sema) == 0) {
void *skb;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
- ulong flags;
-#endif // endif
#ifdef ENABLE_ADAPTIVE_SCHED
dhd_sched_policy(dhd_rxf_prio);
#endif /* ENABLE_ADAPTIVE_SCHED */
PKTSETNEXT(pub->osh, skb, NULL);
bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
-#else
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-
-#endif // endif
skb = skbnext;
}
#if defined(WAIT_DEQUEUE)
}
return;
} else {
+ dhd_bus_set_dpc_sched_time(dhdp);
tasklet_schedule(&dhd->tasklet);
}
}
}
#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void
dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct ethtool_ops dhd_ethtool_ops = {
.get_drvinfo = dhd_ethtool_get_drvinfo
};
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
static int
dhd_ethtool(dhd_info_t *dhd, void *uaddr)
{
return 0;
}
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
{
dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
#ifdef BCMPCIE
} else if (dhdp->d3ackcnt_timeout) {
- dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_D3_ACK_TIMEOUT;
#endif /* BCMPCIE */
} else {
- dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
+ dhdp->hang_reason = dhdp->is_sched_error ?
+ HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
+ HANG_REASON_IOCTL_RESP_TIMEOUT;
}
}
+ printf("%s\n", info_string);
net_os_send_hang_message(net);
return TRUE;
}
bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
__FUNCTION__, __LINE__);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx_ni(dhd->monitor_skb);
DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#else
- ulong flags;
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx(dhd->monitor_skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
dhd->monitor_skb = NULL;
dhd_pub_t *dhdp = (dhd_pub_t *)handle;
dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- dhdp->hang_was_sent = 0;
+ dhdp->hang_was_sent = 0;
- dhd_os_send_hang_message(&dhd->pub);
-#else
- DHD_ERROR(("%s: unsupported\n", __FUNCTION__));
-#endif // endif
+ dhd_os_send_hang_message(&dhd->pub);
}
EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
};
static void
-dhd_add_monitor_if(void *handle, void *event_info, u8 event)
+dhd_add_monitor_if(dhd_info_t *dhd)
{
- dhd_info_t *dhd = handle;
struct net_device *dev;
char *devname;
uint32 scan_suppress = FALSE;
int ret = BCME_OK;
- if (event != DHD_WQ_WORK_IF_ADD) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ if (dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
return;
}
dev->type = ARPHRD_IEEE80211_RADIOTAP;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- dev->hard_start_xmit = dhd_monitor_start;
- dev->do_ioctl = dhd_monitor_ioctl;
- dev->get_stats = dhd_monitor_get_stats;
-#else
dev->netdev_ops = &netdev_monitor_ops;
-#endif // endif
- if (register_netdev(dev)) {
+ if (register_netdevice(dev)) {
DHD_ERROR(("%s, register_netdev failed for %s\n",
__FUNCTION__, dev->name));
free_netdev(dev);
+ return;
}
if (FW_SUPPORTED((&dhd->pub), monitor)) {
}
static void
-dhd_del_monitor_if(void *handle, void *event_info, u8 event)
+dhd_del_monitor_if(dhd_info_t *dhd)
{
- dhd_info_t *dhd = handle;
-
- if (event != DHD_WQ_WORK_IF_DEL) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
+ int ret = BCME_OK;
+ uint32 scan_suppress = FALSE;
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
- if (dhd->monitor_dev) {
- unregister_netdev(dhd->monitor_dev);
+ if (!dhd->monitor_dev) {
+ DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
+ return;
+ }
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
- MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
- MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
-#else
- free_netdev(dhd->monitor_dev);
-#endif /* 2.6.24 */
+ if (FW_SUPPORTED((&dhd->pub), monitor)) {
+ scan_suppress = FALSE;
+ /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
+ ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
+ sizeof(scan_suppress), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+ if (dhd->monitor_dev) {
+ if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(dhd->monitor_dev);
+ } else {
+ unregister_netdevice(dhd->monitor_dev);
+ }
dhd->monitor_dev = NULL;
}
}
static void
-dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
+dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
{
- dhd_info_t *info = dhd->info;
+ dhd_info_t *dhd = pub->info;
DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
- if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
- DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
- return;
- }
- /* Delete monitor */
+ dhd_net_if_lock_local(dhd);
if (!val) {
- info->monitor_type = val;
- dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
- dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
- return;
+ /* Delete monitor */
+ dhd_del_monitor_if(dhd);
+ } else {
+ /* Add monitor */
+ dhd_add_monitor_if(dhd);
}
-
- /* Add monitor */
- info->monitor_type = val;
- dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
- dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+ dhd->monitor_type = val;
+ dhd_net_if_unlock_local(dhd);
}
#endif /* WL_MONITOR */
bcmerror = BCME_UNSUPPORTED;
goto done;
}
+
bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
#ifdef WL_MONITOR
}
#endif /* defined(WL_WIRELESS_EXT) */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
if (cmd == SIOCETHTOOL) {
ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
}
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
if (cmd == SIOCDEVPRIVATE+1) {
ret = wl_android_priv_cmd(net, ifr);
static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
{
if (dhd) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhd->cpufreq_fix);
-#endif // endif
dhd->cpufreq_fix_status = FALSE;
}
return 0;
static void dhd_fix_cpu_freq(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_lock(&dhd->cpufreq_fix);
-#endif // endif
if (dhd && !dhd->cpufreq_fix_status) {
pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
#ifdef FIX_BUS_MIN_CLOCK
dhd->cpufreq_fix_status = TRUE;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhd->cpufreq_fix);
-#endif // endif
}
static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_lock(&dhd ->cpufreq_fix);
-#endif // endif
if (dhd && dhd->cpufreq_fix_status != TRUE) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhd->cpufreq_fix);
-#endif // endif
return;
}
DHD_ERROR(("pm_qos_add_requests called\n"));
dhd->cpufreq_fix_status = FALSE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhd->cpufreq_fix);
-#endif // endif
}
#endif /* FIX_CPU_MIN_CLOCK */
dhd_info_t *dhd = DHD_DEV_INFO(net);
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
- printf("%s: Enter %p\n", __FUNCTION__, net);
+ printf("%s: Enter %s\n", __FUNCTION__, net->name);
dhd->pub.rxcnt_timeout = 0;
dhd->pub.txcnt_timeout = 0;
ifidx = dhd_net2idx(dhd, net);
BCM_REFERENCE(ifidx);
- /* Set state and stop OS transmissions */
- netif_stop_queue(net);
+ DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
+
#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
/* If static if is operational, don't reset the chip */
if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
- DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
+ DHD_ERROR(("static if operational. skip chip reset.\n"));
skip_reset = true;
+ wl_cfg80211_sta_ifdown(net);
goto exit;
}
#endif /* WL_STATIC_IF && WL_CFG80211 */
+
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
#ifdef WL_CFG80211
/* Disable Runtime PM before interface down */
* when the primary Interface is brought down. [ifconfig wlan0 down]
*/
if (!dhd_download_fw_on_driverload) {
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
int i;
#ifdef WL_CFG80211_P2P_DEV_IF
wl_cfg80211_del_p2p_wdev(net);
#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_clear(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
dhd_net_if_lock_local(dhd);
for (i = 1; i < DHD_MAX_IFS; i++)
OLD_MOD_DEC_USE_COUNT;
exit:
+ if (skip_reset == false) {
#if defined(WL_WIRELESS_EXT)
- if (ifidx == 0) {
+ if (ifidx == 0) {
+ wl_iw_down(net, &dhd->pub);
+ }
+#endif /* defined(WL_WIRELESS_EXT) */
#ifdef WL_ESCAN
- wl_escan_down(&dhd->pub);
-#else
- wl_iw_down(&dhd->pub);
+ if (ifidx == 0) {
+ wl_escan_down(net, &dhd->pub);
+ }
#endif /* WL_ESCAN */
- }
-#endif /* defined(WL_WIRELESS_EXT) */
- if (skip_reset == false) {
if (ifidx == 0 && !dhd_download_fw_on_driverload) {
#if defined(BT_OVER_SDIO)
dhd_bus_put(&dhd->pub, WLAN_MODULE);
wl_android_set_wifi_on_flag(FALSE);
#else
wl_android_wifi_off(net, TRUE);
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
#ifdef WL_EXT_IAPSTA
wl_ext_iapsta_dettach_netdev(net, ifidx);
-#endif
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(net, &dhd->pub);
+#endif /* WL_ESCAN */
+ wl_ext_event_dettach_netdev(net, ifidx);
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
#endif /* BT_OVER_SDIO */
}
#ifdef SUPPORT_DEEP_SLEEP
}
#endif /* SUPPORT_DEEP_SLEEP */
dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
/* Clear country spec for for built-in type driver */
if (!dhd_download_fw_on_driverload) {
DHD_OS_WAKE_LOCK_DESTROY(dhd);
dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
}
- printf("%s: Exit\n", __FUNCTION__);
+ printf("%s: Exit %s\n", __FUNCTION__, net->name);
mutex_unlock(&dhd->pub.ndev_op_sync);
return 0;
#endif
#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
int bytes_written = 0;
- struct dhd_conf *conf;
#endif
mutex_lock(&dhd->pub.ndev_op_sync);
}
}
- printf("%s: Enter %p\n", __FUNCTION__, net);
+ printf("%s: Enter %s\n", __FUNCTION__, net->name);
DHD_MUTEX_LOCK();
/* Init wakelock */
if (!dhd_download_fw_on_driverload) {
DHD_PERIM_LOCK(&dhd->pub);
dhd->pub.dongle_trap_occured = 0;
dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_was_pending = 0;
dhd->pub.hang_reason = 0;
dhd->pub.iovar_timeout_occured = 0;
#ifdef PCIE_FULL_DONGLE
dhd->pub.d3ack_timeout_occured = 0;
dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_MAP_LOGGING
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
dhd->pub.smmu_fault_occurred = 0;
-#endif /* DHD_MAP_LOGGING */
#ifdef DHD_LOSSLESS_ROAMING
dhd->pub.dequeue_prec_map = ALLPRIO;
atomic_set(&dhd->pend_8021x_cnt, 0);
if (!dhd_download_fw_on_driverload) {
DHD_ERROR(("\n%s\n", dhd_version));
+ DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
+ wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
+#ifdef WL_ESCAN
+ wl_escan_event_attach(net, &dhd->pub);
+#endif /* WL_ESCAN */
#ifdef WL_EXT_IAPSTA
wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
-#endif
+#endif /* WL_EXT_IAPSTA */
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
g_first_broadcast_scan = TRUE;
#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
skb_queue_head_init(&dhd->rx_napi_queue);
} /* rx_napi_netdev == NULL */
#endif /* DHD_LB_RXP */
-#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(&dhd->pub);
-#endif /* DHD_LB_IRQSET */
#if defined(DHD_LB_TXP)
/* Use the variant that uses locks */
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
}
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+#ifdef DHD_LB_IRQSET
+ dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
+#endif /* DHD_LB_IRQSET */
#if defined(ARGOS_NOTIFY_CB)
argos_register_notifier_init(net);
#endif // endif
#endif /* NUM_SCB_MAX_PROBE */
#endif /* WL_CFG80211 */
#if defined(WL_WIRELESS_EXT)
+ if (unlikely(wl_iw_up(net, &dhd->pub))) {
+ DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+#endif
#ifdef WL_ESCAN
if (unlikely(wl_escan_up(net, &dhd->pub))) {
DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
ret = -1;
goto exit;
}
-#endif
-#endif
-#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+#endif /* WL_ESCAN */
+#if defined(ISAM_PREINIT)
if (!dhd_download_fw_on_driverload) {
- conf = dhd_get_conf(net);
- if (conf) {
- wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written);
- wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written);
- wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written);
+ if (dhd->pub.conf) {
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
}
}
#endif
}
- /* Allow transmit calls */
- netif_start_queue(net);
dhd->pub.up = 1;
if (wl_event_enable) {
dhd->pub.logtrace_pkt_sendup = false;
}
- OLD_MOD_INC_USE_COUNT;
+ OLD_MOD_INC_USE_COUNT;
+
+#ifdef BCMDBGFS
+ dhd_dbgfs_init(&dhd->pub);
+#endif // endif
+
+exit:
+ mutex_unlock(&dhd->pub.ndev_op_sync);
+ if (ret) {
+ dhd_stop(net);
+ }
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ DHD_MUTEX_UNLOCK();
+
+ printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
+ return ret;
+}
+
+/*
+ * ndo_start handler for primary ndev
+ */
+static int
+dhd_pri_open(struct net_device *net)
+{
+ s32 ret;
+
+ ret = dhd_open(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
+ return ret;
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ DHD_ERROR(("[%s] tx queue started\n", net->name));
+ return ret;
+}
-#ifdef BCMDBGFS
- dhd_dbgfs_init(&dhd->pub);
-#endif // endif
+/*
+ * ndo_stop handler for primary ndev
+ */
+static int
+dhd_pri_stop(struct net_device *net)
+{
+ s32 ret;
-exit:
- mutex_unlock(&dhd->pub.ndev_op_sync);
- if (ret) {
- dhd_stop(net);
- }
+ /* stop tx queue */
+ netif_stop_queue(net);
+ DHD_ERROR(("[%s] tx queue stopped\n", net->name));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- DHD_MUTEX_UNLOCK();
+ ret = dhd_stop(net);
+ if (unlikely(ret)) {
+ DHD_ERROR(("dhd_stop failed: %d\n", ret));
+ return ret;
+ }
- printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
return ret;
}
goto done;
}
- DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
+ printf("%s: Enter %s\n", __FUNCTION__, net->name);
/* Ensure fw is initialized. If it is already initialized,
* dhd_open will return success.
*/
netif_start_queue(net);
}
done:
+ printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
return ret;
}
struct bcm_cfg80211 *cfg;
struct net_device *primary_netdev = NULL;
int ret = BCME_OK;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
- DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
+ printf("%s: Enter %s\n", __FUNCTION__, net->name);
/* Ensure queue is disabled */
netif_tx_disable(net);
ret = wl_cfg80211_static_if_close(net);
+ if (dhd->pub.up == 0) {
+ /* If fw is down, return */
+ DHD_ERROR(("fw down\n"));
+ return BCME_OK;
+ }
/* If STA iface is not in operational, invoke dhd_close from this
* context.
*/
primary_netdev = bcmcfg_to_prmry_ndev(cfg);
if (!(primary_netdev->flags & IFF_UP)) {
ret = dhd_stop(primary_netdev);
+ } else {
+ DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
}
+ printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
return ret;
}
} else if (ndev->name[0] != '\0') {
strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
}
- if (mac != NULL)
- memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
+ if (mac != NULL) {
+ (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
+ }
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
+ wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
+#ifdef WL_ESCAN
+ wl_escan_event_attach(ndev, dhdp);
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_ifadding(ndev, ifidx);
+ wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
+ wl_ext_iapsta_attach_name(ndev, ifidx);
+#endif /* WL_EXT_IAPSTA */
+ } else if (if_state == NDEV_STATE_FW_IF_DELETED) {
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(ndev, dhdp);
+#endif /* WL_ESCAN */
+ wl_ext_event_dettach_netdev(ndev, cur_idx);
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
}
DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
ifidx, cur_idx, if_state));
DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp->recv_reassoc_evt = FALSE;
+ ifp->post_roam_evt = FALSE;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
+ dhd_reset_tcpsync_info_by_ifp(ifp);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
return ifp->net;
fail:
}
static void
-dhd_cleanup_ifp(dhd_pub_t *dhdp, s32 ifidx)
+dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
{
- dhd_if_t *ifp;
- dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
#ifdef PCIE_FULL_DONGLE
+ s32 ifidx = 0;
if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
#endif /* PCIE_FULL_DONGLE */
- ifp = dhdinfo->iflist[ifidx];
if (ifp != NULL) {
+ if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
+ ASSERT(0);
+ return;
+ }
#ifdef DHD_L2_FILTER
bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
NULL, FALSE, dhdpub->tickcnt);
dhd_if_del_sta_list(ifp);
#ifdef PCIE_FULL_DONGLE
/* Delete flowrings of virtual interface */
+ ifidx = ifp->idx;
if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
dhd_flow_rings_delete(dhdp, ifidx);
}
return;
}
- dhd_cleanup_ifp(dhdp, ifp->idx);
+ dhd_cleanup_ifp(dhdp, ifp);
}
/* unregister and free the the net_device interface associated with the indexed
dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
dhd_if_t *ifp;
unsigned long flags;
- u32 timeout;
+ long timeout;
ifp = dhdinfo->iflist[ifidx];
if (ifp != NULL) {
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ cancel_delayed_work_sync(&ifp->m4state_work);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ cancel_work_sync(&ifp->blk_tsfl_work);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
#ifdef WL_STATIC_IF
/* static IF will be handled in detach */
if (ifp->static_if) {
unregister_netdev(ifp->net);
else
unregister_netdevice(ifp->net);
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
-#endif
+ wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_event_dettach(ifp->net, dhdpub);
+#endif /* WL_ESCAN */
+ wl_ext_event_dettach_netdev(ifp->net, ifidx);
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
}
ifp->net = NULL;
DHD_GENERAL_LOCK(dhdpub, flags);
ifp->del_in_progress = false;
DHD_GENERAL_UNLOCK(dhdpub, flags);
}
- dhd_cleanup_ifp(dhdpub, ifidx);
+ dhd_cleanup_ifp(dhdpub, ifp);
DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
return BCME_OK;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
static struct net_device_ops dhd_ops_pri = {
- .ndo_open = dhd_open,
- .ndo_stop = dhd_stop,
+ .ndo_open = dhd_pri_open,
+ .ndo_stop = dhd_pri_stop,
.ndo_get_stats = dhd_get_stats,
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
.ndo_do_ioctl = dhd_ioctl_entry_wrapper,
.ndo_set_multicast_list = dhd_set_multicast_list,
#endif // endif
};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
int
dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
if (!fp || !buf || buflen == 0)
return -1;
- if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
+ if (compat_vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
return -1;
*posn = wr_posn;
return error;
} /* dhd_init_static_strs_array */
-static int
-dhd_trace_open_proc(struct inode *inode, struct file *file)
-{
- return single_open(file, 0, NULL);
-}
-
-ssize_t
-dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
-{
- trace_buf_info_t *trace_buf_info;
- int ret = BCME_ERROR;
-
- ASSERT(g_dhd_pub);
- mutex_lock(&g_dhd_pub->dhd_trace_lock);
- trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh,
- sizeof(trace_buf_info_t));
- if (trace_buf_info) {
- dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info);
- if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
- {
- ret = -EFAULT;
- goto exit;
- }
- if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
- ret = BUF_NOT_AVAILABLE;
- else
- ret = trace_buf_info->size;
- } else
- DHD_ERROR(("Memory allocation Failed\n"));
-
-exit:
- if (trace_buf_info) {
- MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
- }
- mutex_unlock(&g_dhd_pub->dhd_trace_lock);
- return ret;
-}
#endif /* SHOW_LOGTRACE */
#ifdef DHD_ERPOM
/* save core dump to a file */
if (dhdp->memdump_enabled) {
#ifdef DHD_SSSR_DUMP
- if (dhdp->sssr_inited) {
- dhdp->info->no_wq_sssrdump = TRUE;
- dhd_bus_sssr_dump(dhdp);
- dhdp->info->no_wq_sssrdump = FALSE;
- }
+ dhdp->collect_sssr = TRUE;
#endif /* DHD_SSSR_DUMP */
dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
dhd_bus_mem_dump(dhdp);
#endif /* BT_OVER_SDIO */
g_dhd_pub = &dhd->pub;
- DHD_INFO(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
#ifdef DHD_DEBUG
dll_init(&(dhd->pub.mw_list_head));
/* Link to bus module */
dhd->pub.bus = bus;
dhd->pub.hdrlen = bus_hdrlen;
+ dhd->pub.txoff = FALSE;
/* dhd_conf must be attached after linking dhd to dhd->pub.info,
* because dhd_detech will check .info is NULL or not.
/* initialize the l2_filter_cnt */
dhd->pub.l2_filter_cnt = 0;
#endif // endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- net->open = NULL;
-#else
net->netdev_ops = NULL;
-#endif // endif
mutex_init(&dhd->dhd_iovar_mutex);
sema_init(&dhd->proto_sem, 1);
init_waitqueue_head(&dhd->dmaxfer_wait);
init_waitqueue_head(&dhd->pub.tx_completion_wait);
dhd->pub.dhd_bus_busy_state = 0;
-
/* Initialize the spinlocks */
spin_lock_init(&dhd->sdlock);
spin_lock_init(&dhd->txqlock);
#ifdef WLTDLS
spin_lock_init(&dhd->pub.tdls_lock);
#endif /* WLTDLS */
-#if defined(PCIE_FULL_DONGLE)
- spin_lock_init(&dhd->backplane_access_lock);
-#endif /* defined(PCIE_FULL_DONGLE) */
#if defined(RXFRAME_THREAD)
dhd->rxthread_enabled = TRUE;
#endif /* defined(RXFRAME_THREAD) */
wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
#endif /* CONFIG_HAS_WAKELOCK */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhd->dhd_net_if_mutex);
mutex_init(&dhd->dhd_suspend_mutex);
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
mutex_init(&dhd->dhd_apf_mutex);
#endif /* PKT_FILTER_SUPPORT && APF */
-#endif // endif
dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
/* Attach and link in the protocol */
dhd_state |= DHD_ATTACH_STATE_CFG80211;
#endif // endif
-#if defined(WL_WIRELESS_EXT)
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
+ if (wl_ext_event_attach(net, &dhd->pub) != 0) {
+ DHD_ERROR(("wl_ext_event_attach failed\n"));
+ goto fail;
+ }
#ifdef WL_ESCAN
+ /* Attach and link in the escan */
if (wl_escan_attach(net, &dhd->pub) != 0) {
DHD_ERROR(("wl_escan_attach failed\n"));
goto fail;
}
-#else
+#endif /* WL_ESCAN */
+#ifdef WL_EXT_IAPSTA
+ if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
+ goto fail;
+}
+#endif /* WL_EXT_IAPSTA */
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
+#if defined(WL_WIRELESS_EXT)
/* Attach and link in the iw */
if (wl_iw_attach(net, &dhd->pub) != 0) {
DHD_ERROR(("wl_iw_attach failed\n"));
goto fail;
}
dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
-#endif /* WL_ESCAN */
#endif /* defined(WL_WIRELESS_EXT) */
-#ifdef WL_EXT_IAPSTA
- if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
- DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
- goto fail;
- }
-#endif
#ifdef SHOW_LOGTRACE
ret = dhd_init_logstrs_array(osh, &dhd->event_data);
}
#endif /* SHOW_LOGTRACE */
-#ifdef DEBUGABILITY
/* attach debug if support */
if (dhd_os_dbg_attach(&dhd->pub)) {
DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
goto fail;
}
-
+#ifdef DEBUGABILITY
#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
/* enable verbose ring to support dump_trace_buf */
dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
#endif /* DBG_PKT_MON */
#endif /* DEBUGABILITY */
+#ifdef DHD_STATUS_LOGGING
+ dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
+ MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
+ if (dhd->pub.statlog == NULL) {
+ DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
+ }
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef DHD_LOG_DUMP
dhd_log_dump_init(&dhd->pub);
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_init(&dhd->pub);
+#endif /* DHD_PKTDUMP_ROAM */
if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
#ifndef BCMDBUS
/* Set up the watchdog timer */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&dhd->timer, dhd_watchdog, 0);
-#else
- init_timer(&dhd->timer);
- dhd->timer.data = (ulong)dhd;
- dhd->timer.function = dhd_watchdog;
-#endif
+ init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
dhd->default_wd_interval = dhd_watchdog_ms;
if (dhd_watchdog_prio >= 0) {
#ifdef SHOW_LOGTRACE
skb_queue_head_init(&dhd->evt_trace_queue);
- if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL)
- DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
- mutex_init(&dhd->pub.dhd_trace_lock);
+
+ /* Create ring proc entries */
+ dhd_dbg_ring_proc_create(&dhd->pub);
#endif /* SHOW_LOGTRACE */
/* Set up the bottom half handler */
}
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+ INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
#ifdef DEBUG_CPU_FREQ
dhd->new_freq = alloc_percpu(int);
dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
#endif /* DHD_LB_TXP_DEFAULT_ENAB */
#endif /* DHD_LB_TXP */
+#ifdef DHD_LB_RXP
+ /* Trun ON the feature by default */
+ atomic_set(&dhd->lb_rxp_active, 1);
+#endif /* DHD_LB_RXP */
+
/* Initialize the Load Balancing Tasklets and Napi object */
#if defined(DHD_LB_TXC)
tasklet_init(&dhd->tx_compl_tasklet,
INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
#endif /* DHD_LB_TXC */
-
#if defined(DHD_LB_RXC)
tasklet_init(&dhd->rx_compl_tasklet,
dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
#if defined(BCMPCIE)
dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
if (dhd->pub.extended_trap_data == NULL) {
DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
+ if (dhd->pub.axi_err_dump == NULL) {
+ DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
#endif /* BCMPCIE && ETD */
#ifdef SHOW_LOGTRACE
}
#endif /* DHD_DUMP_MNGR */
#ifdef DHD_FW_COREDUMP
+ /* Set memdump default values */
+ dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
/* Check the memdump capability */
dhd_get_memdump_info(&dhd->pub);
#endif /* DHD_FW_COREDUMP */
DHD_TRACE(("Enter %s:\n", __FUNCTION__));
dhdp->dongle_trap_occured = 0;
+#ifdef DHD_SSSR_DUMP
+ /* Flag to indicate sssr dump is collected */
+ dhdp->sssr_dump_collected = 0;
+#endif /* DHD_SSSR_DUMP */
dhdp->iovar_timeout_occured = 0;
#ifdef PCIE_FULL_DONGLE
dhdp->d3ack_timeout_occured = 0;
dhdp->livelock_occured = 0;
+ dhdp->pktid_audit_failed = 0;
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_MAP_LOGGING
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ /* Clear induced error during initialize */
+ dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
+
+ /* set default value for now. Will be updated again in dhd_preinit_ioctls()
+ * after querying FW
+ */
+ dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ dhdp->event_log_max_sets_queried = FALSE;
dhdp->smmu_fault_occurred = 0;
-#endif /* DHD_MAP_LOGGING */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = FALSE;
+#endif /* DNGL_AXI_ERROR_LOGGING */
DHD_PERIM_LOCK(dhdp);
/* try to download image and nvram to the dongle */
if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
/* Indicate FW Download has not yet done */
- dhd->pub.fw_download_done = FALSE;
+ dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
#if defined(DHD_DEBUG) && defined(BCMSDIO)
return ret;
}
/* Indicate FW Download has succeeded */
- dhd->pub.fw_download_done = TRUE;
+ dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
}
if (dhd->pub.busstate != DHD_BUS_LOAD) {
DHD_PERIM_UNLOCK(dhdp);
uint32 reason = ntoh32(event->reason);
unsigned long flags;
+ /* No handling needed for peer discovered reason */
+ if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
+ return BCME_ERROR;
+ }
if (reason == WLC_E_TDLS_PEER_CONNECTED)
connect = TRUE;
else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
uint32 buf_key_b4_m4 = 1;
uint8 msglen;
eventmsgs_ext_t *eventmask_msg = NULL;
+ uint32 event_log_max_sets = 0;
char* iov_buf = NULL;
int ret2 = 0;
uint32 wnm_cap = 0;
uint32 cac = 1;
#endif /* SUPPORT_SET_CAC */
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
- dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk;
-#endif /* DHD_NON_DMA_M2M_CORRUPTION */
-
#ifdef DHD_ENABLE_LPC
uint32 lpc = 1;
#endif /* DHD_ENABLE_LPC */
#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
#if defined(ARP_OFFLOAD_SUPPORT)
- int arpoe = 1;
+ int arpoe = 0;
#endif // endif
int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
#ifdef BCMPCIE_OOB_HOST_WAKE
uint32 hostwake_oob = 0;
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef EVENT_LOG_RATE_HC
+ /* threshold number of lines per second */
+#define EVENT_LOG_RATE_HC_THRESHOLD 1000
+ uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
+#endif /* EVENT_LOG_RATE_HC */
+ wl_wlc_version_t wlc_ver;
#ifdef PKT_FILTER_SUPPORT
dhd_pkt_filter_enable = TRUE;
dhd->apf_set = FALSE;
#endif /* APF */
#endif /* PKT_FILTER_SUPPORT */
-#ifdef WLTDLS
- dhd->tdls_enable = FALSE;
- dhd_tdls_set_mode(dhd, false);
-#endif /* WLTDLS */
dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
dhd->max_dtim_enable = TRUE;
#else
dhd->max_dtim_enable = FALSE;
#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+ dhd->disable_dtim_in_suspend = FALSE;
+#ifdef SUPPORT_SET_TID
+ dhd->tid_mode = SET_TID_OFF;
+ dhd->target_uid = 0;
+ dhd->target_tid = 0;
+#endif /* SUPPORT_SET_TID */
DHD_TRACE(("Enter %s\n", __FUNCTION__));
#ifdef DHDTCPACK_SUPPRESS
dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
+ fw_version[FW_VER_STR_LEN-1] = '\0';
+ }
+
+ /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
+ if (strstr(fw_version, "WLTEST") != NULL) {
+ DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
+ __FUNCTION__));
+ op_mode = DHD_FLAG_MFG_MODE;
+ }
+
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
(op_mode == DHD_FLAG_MFG_MODE)) {
dhd->op_mode = DHD_FLAG_MFG_MODE;
}
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DNGL_AXI_ERROR_LOGGING
+ ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
+ sizeof(dhd->axierror_logbuf_addr), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
+ dhd->axierror_logbuf_addr = 0;
+ } else {
+ DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
+ dhd->axierror_logbuf_addr));
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef EVENT_LOG_RATE_HC
+ ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
+ sizeof(event_log_rate_hc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
+ } else {
+ DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
+ event_log_rate_hc));
+ }
+#endif /* EVENT_LOG_RATE_HC */
+
#ifdef GET_CUSTOM_MAC_ENABLE
+ memset(hw_ether, 0, sizeof(hw_ether));
ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ if (!memcmp(ðer_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
+ ret = 0;
+ } else
+#endif
if (!ret) {
memset(buf, 0, sizeof(buf));
+#ifdef GET_CUSTOM_MAC_FROM_CONFIG
+ memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
+#endif
bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
if (ret) {
- int i;
DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
__FUNCTION__, MAC2STRDBG(hw_ether), ret));
- for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
- printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
- if ((i+1)%8 == 0)
- printf("\n");
- }
+ prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
ret = BCME_NOTUP;
goto done;
}
arpoe = 0;
#endif // endif
#ifdef PKT_FILTER_SUPPORT
+ if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
+ dhd_pkt_filter_enable = TRUE;
+ else
dhd_pkt_filter_enable = FALSE;
#endif // endif
#ifdef SET_RANDOM_MAC_SOFTAP
(void)concurrent_mode;
#endif // endif
}
-#ifdef BCMSDIO
- if (dhd->conf->sd_f2_blocksize)
- dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
-#endif
#ifdef DISABLE_PRUNED_SCAN
if (FW_SUPPORTED(dhd, rsdb)) {
#ifdef BCMCCX
ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
#endif /* BCMCCX */
+
#ifdef WLTDLS
-#ifdef ENABLE_TDLS_AUTO_MODE
- /* by default TDLS on and auto mode on */
- _dhd_tdls_enable(dhd, true, true, NULL);
-#else
- /* by default TDLS on and auto mode off */
- _dhd_tdls_enable(dhd, true, false, NULL);
-#endif /* ENABLE_TDLS_AUTO_MODE */
+ dhd->tdls_enable = FALSE;
+ dhd_tdls_set_mode(dhd, false);
#endif /* WLTDLS */
#ifdef DHD_ENABLE_LPC
DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
}
+ ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
+ sizeof(event_log_max_sets), FALSE);
+ if (ret == BCME_OK) {
+ dhd->event_log_max_sets = event_log_max_sets;
+ } else {
+ dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
+ }
+ /* Make sure max_sets is set first with wmb and then sets_queried,
+ * this will be used during parsing the logsets in the reverse order.
+ */
+ OSL_SMP_WMB();
+ dhd->event_log_max_sets_queried = TRUE;
+ DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
+ __FUNCTION__, dhd->event_log_max_sets, ret));
+
#ifdef DISABLE_TXBFR
ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
0, TRUE);
DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
}
#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+#if defined(DISABLE_HE_ENAB)
+ control_he_enab = 0;
+#endif /* DISABLE_HE_ENAB */
+ dhd_control_he_enab(dhd, control_he_enab);
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
#ifdef CUSTOM_PSPRETEND_THR
/* Turn off MPC in AP mode */
ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
#endif /* PNO_SUPPORT */
/* enable dongle roaming event */
#ifdef WL_CFG80211
+#if !defined(ROAM_EVT_DISABLE)
setbit(eventmask, WLC_E_ROAM);
+#endif /* !ROAM_EVT_DISABLE */
setbit(eventmask, WLC_E_BSSID);
#endif /* WL_CFG80211 */
#ifdef BCMCCX
#ifdef WL_MBO
setbit(eventmask_msg->mask, WLC_E_MBO);
#endif /* WL_MBO */
+#ifdef WL_BCNRECV
+ setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
+#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+ setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
+ setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
+#endif /* WL_CAC_TS */
+#ifdef WL_CHAN_UTIL
+ setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
+
/* Write updated Event mask */
eventmask_msg->ver = EVENTMSGS_VER;
eventmask_msg->command = EVENTMSGS_SET_MASK;
if (ret < 0)
DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
else {
- char *clmver_temp_buf = NULL;
+ char *ver_temp_buf = NULL, *ver_date_buf = NULL;
+ int len;
- if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+ if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
DHD_ERROR(("Couldn't find \"Data:\"\n"));
} else {
- ptr = (clmver_temp_buf + strlen("Data:"));
- if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+ ver_date_buf = bcmstrstr(buf, "Creation:");
+ ptr = (ver_temp_buf + strlen("Data:"));
+ if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
DHD_ERROR(("Couldn't find New line character\n"));
} else {
memset(clm_version, 0, CLM_VER_STR_LEN);
- strncpy(clm_version, clmver_temp_buf,
- MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
+ len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
+ if (ver_date_buf) {
+ ptr = (ver_date_buf + strlen("Creation:"));
+ ver_date_buf = bcmstrtok(&ptr, "\n", 0);
+ if (ver_date_buf)
+ snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
+ " (%s)", ver_date_buf);
+ }
+ DHD_INFO(("CLM version = %s\n", clm_version));
}
}
+
+ if (strlen(clm_version)) {
+ DHD_INFO(("CLM version = %s\n", clm_version));
+ } else {
+ DHD_ERROR(("Couldn't find CLM version!\n"));
+ }
}
+ dhd_set_version_info(dhd, fw_version);
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- ptr = buf;
- ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
+
+ /* query for 'wlc_ver' to get version info from firmware */
+ memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
+ ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
+ sizeof(wl_wlc_version_t), FALSE);
if (ret < 0)
DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
else {
- bcmstrtok(&ptr, "\n", 0);
- strncpy(fw_version, buf, FW_VER_STR_LEN);
- fw_version[FW_VER_STR_LEN-1] = '\0';
- dhd_set_version_info(dhd, buf);
-#ifdef WRITE_WLANINFO
- sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
-#endif /* WRITE_WLANINFO */
+ dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
+ dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
}
#ifdef GEN_SOFTAP_INFO_FILE
sec_save_softap_info();
}
}
-#if defined(DHD_NON_DMA_M2M_CORRUPTION)
+#ifdef DHD_NON_DMA_M2M_CORRUPTION
/* check pcie non dma loopback */
- if (dhd->op_mode == DHD_FLAG_MFG_MODE) {
- memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t));
- pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH;
- pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK;
- pcie_dmaxfer_lpbk.wait = TRUE;
-
- if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0,
- (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t),
- IOV_SET)) < 0) {
- DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
- ret));
- goto done;
- }
-
- if (pcie_dmaxfer_lpbk.u.status != BCME_OK) {
- DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
- " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status));
- ret = BCME_ERROR;
+ if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
+ (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
goto done;
- } else {
-
- DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
- }
}
#endif /* DHD_NON_DMA_M2M_CORRUPTION */
| WL_WNM_BSSTRANS | WL_WNM_NOTIF
#endif // endif
;
+#if defined(WL_MBO) && defined(WL_OCE)
+ if (FW_SUPPORTED(dhd, estm)) {
+ wnm_cap |= WL_WNM_ESTM;
+ }
+#endif /* WL_MBO && WL_OCE */
if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
DHD_ERROR(("failed to set WNM capabilities\n"));
}
if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
- if (dhd_start_ecounters(dhd) != BCME_OK) {
- DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
- } else if (dhd_start_event_ecounters(dhd) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
- }
-
+ dhd_ecounter_configure(dhd, TRUE);
}
/* store the preserve log set numbers */
}
#endif /* WL_MONITOR */
+#ifdef CONFIG_SILENT_ROAM
+ dhd->sroam_turn_on = TRUE;
+ dhd->sroamed = FALSE;
+#endif /* CONFIG_SILENT_ROAM */
+
dhd_conf_postinit_ioctls(dhd);
done:
if (!ifa || !(ifa->ifa_dev->dev))
return NOTIFY_DONE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
/* Filter notifications meant for non Broadcom devices */
if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
(ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
#endif /* WL_ENABLE_P2P_IF */
return NOTIFY_DONE;
}
-#endif /* LINUX_VERSION_CODE */
dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
if (!dhd)
__FUNCTION__));
aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
break;
case NETDEV_DOWN:
dhd_aoe_hostip_clr(&dhd->pub, idx);
dhd_aoe_arp_clr(&dhd->pub, idx);
}
+ dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
break;
default:
struct ipv6_work_info_t *ndo_info;
int idx;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
/* Filter notifications meant for non Broadcom devices */
if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
return NOTIFY_DONE;
}
-#endif /* LINUX_VERSION_CODE */
dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
if (!dhd) {
}
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+/* Network attach to be invoked from the bus probe handlers */
+int
+dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
+{
+ struct net_device *primary_ndev;
+ BCM_REFERENCE(primary_ndev);
+
+ /* Register primary net device */
+ if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
+ return BCME_ERROR;
+ }
+
+#if defined(WL_CFG80211)
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (wl_cfg80211_net_attach(primary_ndev) < 0) {
+ /* fail the init */
+ dhd_remove_if(dhdp, 0, TRUE);
+ return BCME_ERROR;
+ }
+#endif /* WL_CFG80211 */
+ return BCME_OK;
+}
+
int
dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
{
net = ifp->net;
ASSERT(net && (ifp->idx == ifidx));
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- ASSERT(!net->open);
- net->get_stats = dhd_get_stats;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- net->do_ioctl = dhd_ioctl_entry_wrapper;
- net->hard_start_xmit = dhd_start_xmit_wrapper;
-#else
- net->do_ioctl = dhd_ioctl_entry;
- net->hard_start_xmit = dhd_start_xmit;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
- net->set_mac_address = dhd_set_mac_address;
- net->set_multicast_list = dhd_set_multicast_list;
- net->open = net->stop = NULL;
-#else
ASSERT(!net->netdev_ops);
net->netdev_ops = &dhd_ops_virt;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
/* Ok, link into the network layer... */
if (ifidx == 0) {
/*
* device functions for the primary interface only
*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- net->open = dhd_open;
- net->stop = dhd_stop;
-#else
net->netdev_ops = &dhd_ops_pri;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
} else {
}
net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &dhd_ethtool_ops;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
#if defined(WL_WIRELESS_EXT)
#if WIRELESS_EXT < 19
if (ifidx == 0)
printf("%s\n", dhd_version);
+ else {
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
+ wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
+#ifdef WL_ESCAN
+ wl_escan_event_attach(net, dhdp);
+#endif /* WL_ESCAN */
#ifdef WL_EXT_IAPSTA
- else
wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
-#endif
+#endif /* WL_EXT_IAPSTA */
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
+ }
if (ifidx != 0) {
if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
goto fail;
}
+ if (ifidx == 0) {
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
+ wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
+#ifdef WL_ESCAN
+ wl_escan_event_attach(net, dhdp);
+#endif /* WL_ESCAN */
#ifdef WL_EXT_IAPSTA
- if (ifidx == 0)
wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+#endif /* WL_EXT_IAPSTA */
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
+ }
+#ifdef WL_EXT_IAPSTA
wl_ext_iapsta_attach_name(net, ifidx);
-#endif
+#endif /* WL_EXT_IAPSTA */
printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
MAC2STRDBG(net->dev_addr));
// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#endif // endif
-#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
- KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
+#if (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
if (ifidx == 0) {
#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
up(&dhd_registration_sem);
#endif /* BT_OVER_SDIO */
}
#if defined(WL_WIRELESS_EXT)
-#ifdef WL_ESCAN
- wl_escan_down(&dhd->pub);
-#endif /* WL_ESCAN */
+ wl_iw_down(net, &dhd->pub);
#endif /* defined(WL_WIRELESS_EXT) */
}
-#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
+#endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
return 0;
fail:
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
- net->open = NULL;
-#else
net->netdev_ops = NULL;
-#endif // endif
return err;
}
DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
dhd->pub.up = 0;
if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
/* Give sufficient time for threads to start running in case
#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
#if defined(WL_WIRELESS_EXT)
-#ifdef WL_ESCAN
- wl_escan_detach(dhdp);
-#else
if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
/* Detatch and unlink in the iw */
- wl_iw_detach(dhdp);
+ wl_iw_detach(dev, dhdp);
}
-#endif /* WL_ESCAN */
#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
#ifdef WL_EXT_IAPSTA
wl_ext_iapsta_dettach(dhdp);
-#endif
+#endif /* WL_EXT_IAPSTA */
+#ifdef WL_ESCAN
+ wl_escan_detach(dev, dhdp);
+#endif /* WL_ESCAN */
+ wl_ext_event_dettach(dhdp);
+#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
#ifdef DHD_ULP
dhd_ulp_deinit(dhd->pub.osh, dhdp);
}
#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ cancel_work_sync(&dhd->axi_error_dispatcher_work);
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+
DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
#ifdef WL_CFG80211
dhd_os_dbg_detach_pkt_monitor(dhdp);
dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
#endif /* DBG_PKT_MON */
- dhd_os_dbg_detach(dhdp);
}
#endif /* DEBUGABILITY */
+ if (dhdp->dbg) {
+ dhd_os_dbg_detach(dhdp);
+ }
+#ifdef DHD_STATUS_LOGGING
+ dhd_detach_statlog(dhdp);
+#endif /* DHD_STATUS_LOGGING */
+#ifdef DHD_PKTDUMP_ROAM
+ dhd_dump_pkt_deinit(dhdp);
+#endif /* DHD_PKTDUMP_ROAM */
#ifdef SHOW_LOGTRACE
/* Release the skbs from queue for WLC_E_TRACE event */
dhd_event_logtrace_flush_queue(dhdp);
/* Wait till event logtrace context finishes */
dhd_cancel_logtrace_process_sync(dhd);
- mutex_lock(&dhd->pub.dhd_trace_lock);
- remove_proc_entry("dhd_trace", NULL);
- mutex_unlock(&dhd->pub.dhd_trace_lock);
+
+ /* Remove ring proc entries */
+ dhd_dbg_ring_proc_destroy(&dhd->pub);
if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
if (dhd->event_data.fmts) {
dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
#endif /* DHD_DEBUG */
#ifdef WL_MONITOR
- dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
+ dhd_del_monitor_if(dhd);
#endif /* WL_MONITOR */
#ifdef DHD_ERPOM
}
#endif /* DHD_ERPOM */
+ cancel_work_sync(&dhd->dhd_hang_process_work);
+
/* Prefer adding de-init code above this comment unless necessary.
* The idea is to cancel work queue, sysfs and flags at the end.
*/
MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
dhdp->extended_trap_data = NULL;
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhdp->axi_err_dump)
+ {
+ MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ dhdp->axi_err_dump = NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
#endif /* BCMPCIE */
#ifdef DHD_DUMP_MNGR
}
#endif /* DHD_DUMP_MNGR */
dhd_sysfs_exit(dhd);
- dhd->pub.fw_download_done = FALSE;
+ dhd->pub.fw_download_status = FW_UNLOADED;
#if defined(BT_OVER_SDIO)
mutex_destroy(&dhd->bus_user_lock);
return NOTIFY_DONE;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
#else
late_initcall(dhd_module_init);
#endif /* USE_LATE_INITCALL_SYNC */
-#else
-module_init(dhd_module_init);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
module_exit(dhd_module_exit);
}
int
-dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched)
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
{
dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout, timeout_tmp = dhd_ioctl_timeout_msec;
-
- if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
- timeout_tmp = dhd_ioctl_timeout_msec;
- dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec;
- }
+ int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
-#else
- timeout = dhd_ioctl_timeout_msec * HZ / 1000;
-#endif // endif
DHD_PERIM_UNLOCK(pub);
timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
- if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
- dhd_ioctl_timeout_msec = timeout_tmp;
- }
-
DHD_PERIM_LOCK(pub);
return timeout;
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
-#else
- timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000;
-#endif // endif
DHD_PERIM_UNLOCK(pub);
* so that IOCTL timeout should not get affected.
*/
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
int timeout;
/* Convert timeout in millsecond to jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-#else
- timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
-#endif // endif
timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
((*var & bitmask) == condition), timeout);
{
int ret = 0;
dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
DHD_PERIM_UNLOCK(pub);
- ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition));
+ ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
DHD_PERIM_LOCK(pub);
return ret;
wake_up(&dhd->tx_completion_wait);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
/* Fix compilation error for FC11 */
-INLINE
-#endif // endif
-int
+INLINE int
dhd_os_busbusy_wake(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
dhd_pub_t *pub = bus;
dhd_info_t *dhd = (dhd_info_t *)pub->info;
- if (extend)
- dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
- else
- dhd_os_wd_timer(bus, dhd->default_wd_interval);
-#endif /* !BCMDBUS */
+ if (extend)
+ dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+ else
+ dhd_os_wd_timer(bus, dhd->default_wd_interval);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+#ifndef BCMDBUS
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ DHD_GENERAL_UNLOCK(pub, flags);
+#ifdef BCMSDIO
+ if (!wdtick) {
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+ }
+#endif /* BCMSDIO */
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->timer);
+#ifdef BCMSDIO
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+#endif /* BCMSDIO */
+ return;
+ }
+
+ if (wdtick) {
+#ifdef BCMSDIO
+ DHD_OS_WD_WAKE_LOCK(pub);
+ dhd_watchdog_ms = (uint)wdtick;
+#endif /* BCMSDIO */
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ dhd->wd_timer_valid = TRUE;
+ }
+ DHD_GENERAL_UNLOCK(pub, flags);
+#endif /* !BCMDBUS */
+}
+
+void *
+dhd_os_open_image1(dhd_pub_t *pub, char *filename)
+{
+ struct file *fp;
+ int size;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp)) {
+ fp = NULL;
+ goto err;
+ }
+
+ if (!S_ISREG(file_inode(fp)->i_mode)) {
+ DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
+ fp = NULL;
+ goto err;
+ }
+
+ size = i_size_read(file_inode(fp));
+ if (size <= 0) {
+ DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
+ fp = NULL;
+ goto err;
+ }
+
+ DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+
+err:
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+ int size;
+
+ if (!image) {
+ return 0;
+ }
+
+ size = i_size_read(file_inode(fp));
+ rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
+
+ if (len >= size && size != rdlen) {
+ return -EIO;
+ }
+
+ if (rdlen > 0) {
+ fp->f_pos += rdlen;
+ }
+
+ return rdlen;
+}
+
+#if defined(BT_OVER_SDIO)
+int
+dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rd_len;
+ uint str_len = 0;
+ char *str_end = NULL;
+
+ if (!image)
+ return 0;
+
+ rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
+ str_end = strnchr(str, len, '\n');
+ if (str_end == NULL) {
+ goto err;
+ }
+ str_len = (uint)(str_end - str);
+
+ /* Advance file pointer past the string length */
+ fp->f_pos += str_len + 1;
+ bzero(str_end, rd_len - str_len);
+
+err:
+ return str_len;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+int
+dhd_os_get_image_size(void *image)
+{
+ struct file *fp = (struct file *)image;
+ int size;
+ if (!image) {
+ return 0;
+ }
+
+ size = i_size_read(file_inode(fp));
+
+ return size;
+}
+
+void
+dhd_os_close_image1(dhd_pub_t *pub, void *image)
+{
+ if (image) {
+ filp_close((struct file *)image, NULL);
+ }
+}
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+ spin_lock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ down(&dhd->sdsem);
+ else
+ spin_lock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+ spin_unlock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ up(&dhd->sdsem);
+ else
+ spin_unlock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_lock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_unlock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->rxf_lock);
+
}
-void
-dhd_os_wd_timer(void *bus, uint wdtick)
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
{
-#ifndef BCMDBUS
- dhd_pub_t *pub = bus;
- dhd_info_t *dhd = (dhd_info_t *)pub->info;
- unsigned long flags;
+ dhd_info_t *dhd;
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->rxf_lock);
+}
- if (!dhd) {
- DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
- return;
- }
+#ifdef DHDTCPACK_SUPPRESS
+unsigned long
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
- DHD_GENERAL_LOCK(pub, flags);
+ dhd = (dhd_info_t *)(pub->info);
- /* don't start the wd until fw is loaded */
- if (pub->busstate == DHD_BUS_DOWN) {
- DHD_GENERAL_UNLOCK(pub, flags);
+ if (dhd) {
#ifdef BCMSDIO
- if (!wdtick) {
- DHD_OS_WD_WAKE_UNLOCK(pub);
- }
+ spin_lock_bh(&dhd->tcpack_lock);
+#else
+ spin_lock_irqsave(&dhd->tcpack_lock, flags);
#endif /* BCMSDIO */
- return;
}
- /* Totally stop the timer */
- if (!wdtick && dhd->wd_timer_valid == TRUE) {
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(pub, flags);
- del_timer_sync(&dhd->timer);
+ return flags;
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
#ifdef BCMSDIO
- DHD_OS_WD_WAKE_UNLOCK(pub);
+ BCM_REFERENCE(flags);
#endif /* BCMSDIO */
- return;
- }
- if (wdtick) {
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
#ifdef BCMSDIO
- DHD_OS_WD_WAKE_LOCK(pub);
- dhd_watchdog_ms = (uint)wdtick;
+ spin_unlock_bh(&dhd->tcpack_lock);
+#else
+ spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
#endif /* BCMSDIO */
- /* Re arm the timer, at last watchdog period */
- mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
- dhd->wd_timer_valid = TRUE;
}
- DHD_GENERAL_UNLOCK(pub, flags);
-#endif /* !BCMDBUS */
}
+#endif /* DHDTCPACK_SUPPRESS */
-void *
-dhd_os_open_image1(dhd_pub_t *pub, char *filename)
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
{
- struct file *fp;
- int size;
+ uint8* buf;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
- fp = filp_open(filename, O_RDONLY, 0);
- /*
- * 2.6.11 (FC4) supports filp_open() but later revs don't?
- * Alternative:
- * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
- * ???
- */
- if (IS_ERR(fp)) {
- fp = NULL;
- goto err;
- }
+ buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+ if (buf == NULL && kmalloc_if_fail)
+ buf = kmalloc(size, flags);
- if (!S_ISREG(file_inode(fp)->i_mode)) {
- DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
- fp = NULL;
- goto err;
- }
+ return buf;
+}
- size = i_size_read(file_inode(fp));
- if (size <= 0) {
- DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
- fp = NULL;
- goto err;
- }
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
- DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
-err:
- return fp;
+ if (!dhd->pub.up) {
+ return NULL;
+ }
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
}
+#endif /* defined(WL_WIRELESS_EXT) */
-int
-dhd_os_get_image_block(char *buf, int len, void *image)
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event, void **data)
{
- struct file *fp = (struct file *)image;
- int rdlen;
- int size;
+ int bcmerror = 0;
+#ifdef WL_CFG80211
+ unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+ ASSERT(dhd != NULL);
- if (!image) {
- return 0;
+#ifdef SHOW_LOGTRACE
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ &dhd->event_data);
+#else
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ NULL);
+#endif /* SHOW_LOGTRACE */
+ if (unlikely(bcmerror != BCME_OK)) {
+ return bcmerror;
}
- size = i_size_read(file_inode(fp));
- rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
-
- if (len >= size && size != rdlen) {
- return -EIO;
+ if (ntoh32(event->event_type) == WLC_E_IF) {
+ /* WLC_E_IF event types are consumed by wl_process_host_event.
+ * For ifadd/del ops, the netdev ptr may not be valid at this
+ * point. so return before invoking cfg80211/wext handlers.
+ */
+ return BCME_OK;
}
- if (rdlen > 0) {
- fp->f_pos += rdlen;
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
+ wl_ext_event_send(dhd->pub.event_params, event, *data);
+#endif
+
+#ifdef WL_CFG80211
+ if (dhd->iflist[ifidx]->net) {
+ spin_lock_irqsave(&dhd->pub.up_lock, flags);
+ if (dhd->pub.up) {
+ wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
+ }
+ spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
}
+#endif /* defined(WL_CFG80211) */
- return rdlen;
+ return (bcmerror);
}
-#if defined(BT_OVER_SDIO)
-int
-dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
{
- struct file *fp = (struct file *)image;
- int rd_len;
- uint str_len = 0;
- char *str_end = NULL;
+ switch (ntoh32(event->event_type)) {
+ /* Handle error case or further events here */
+ default:
+ break;
+ }
+}
- if (!image)
- return 0;
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+ struct sk_buff *p, *skb;
+ uint32 pktlen;
+ int len;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *skb_data;
+ int ifidx = 0;
+ struct ether_header eth;
- rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
- str_end = strnchr(str, len, '\n');
- if (str_end == NULL) {
- goto err;
- }
- str_len = (uint)(str_end - str);
+ pktlen = sizeof(eth) + data_len;
+ dhd = dhdp->info;
- /* Advance file pointer past the string length */
- fp->f_pos += str_len + 1;
- bzero(str_end, rd_len - str_len);
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
-err:
- return str_len;
+ bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
+ eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
+ bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+ skb = PKTTONATIVE(dhdp->osh, p);
+ skb_data = skb->data;
+ len = skb->len;
+
+ ifidx = dhd_ifname2idx(dhd, "wlan0");
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->data = skb_data;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ } else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
+ }
}
-#endif /* defined (BT_OVER_SDIO) */
+#endif /* LOG_INTO_TCPDUMP */
-int
-dhd_os_get_image_size(void *image)
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
{
- struct file *fp = (struct file *)image;
- int size;
- if (!image) {
- return 0;
- }
+#if defined(BCMSDIO)
+ struct dhd_info *dhdinfo = dhd->info;
- size = i_size_read(file_inode(fp));
+ int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
- return size;
-}
+ dhd_os_sdunlock(dhd);
+ wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+ dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) */
+ return;
+} /* dhd_init_static_strs_array */
-void
-dhd_os_close_image1(dhd_pub_t *pub, void *image)
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
{
- if (image) {
- filp_close((struct file *)image, NULL);
- }
+#if defined(BCMSDIO)
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up(&dhdinfo->ctrl_wait);
+#endif // endif
+ return;
}
-void
-dhd_os_sdlock(dhd_pub_t *pub)
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
{
- dhd_info_t *dhd;
+ int ret;
- dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#ifdef BCMDBUS
- spin_lock_bh(&dhd->sdlock);
-#else
- if (dhd_dpc_prio >= 0)
- down(&dhd->sdsem);
- else
- spin_lock_bh(&dhd->sdlock);
-#endif /* !BCMDBUS */
-}
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
+ return BCME_ERROR;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-void
-dhd_os_sdunlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+ if (flag == TRUE) {
+ /* Issue wl down command before resetting the chip */
+ if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+ }
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled) {
+ dhd_wlfc_deinit(&dhd->pub);
+ }
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state) {
+ dhd_pno_deinit(&dhd->pub);
+ }
+#endif // endif
+#ifdef RTT_SUPPORT
+ if (dhd->pub.rtt_state) {
+ dhd_rtt_deinit(&dhd->pub);
+ }
+#endif /* RTT_SUPPORT */
- dhd = (dhd_info_t *)(pub->info);
+#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
+ dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON */
+ }
-#ifdef BCMDBUS
- spin_unlock_bh(&dhd->sdlock);
-#else
- if (dhd_dpc_prio >= 0)
- up(&dhd->sdsem);
- else
- spin_unlock_bh(&dhd->sdlock);
-#endif /* !BCMDBUS */
-}
+#ifdef BCMSDIO
+ if (!flag) {
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+ }
+#endif /* BCMSDIO */
-void
-dhd_os_sdlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+ ret = dhd_bus_devreset(&dhd->pub, flag);
- dhd = (dhd_info_t *)(pub->info);
-#ifdef BCMDBUS
- spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
-#else
- spin_lock_bh(&dhd->txqlock);
-#endif /* BCMDBUS */
-}
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
+ pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-void
-dhd_os_sdunlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+ if (flag) {
+ /* Clear some flags for recovery logic */
+ dhd->pub.dongle_trap_occured = 0;
+ dhd->pub.iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.d3ack_timeout_occured = 0;
+ dhd->pub.livelock_occured = 0;
+ dhd->pub.pktid_audit_failed = 0;
+#endif /* PCIE_FULL_DONGLE */
+ dhd->pub.iface_op_failed = 0;
+ dhd->pub.scan_timeout_occurred = 0;
+ dhd->pub.scan_busy_occurred = 0;
+ dhd->pub.smmu_fault_occurred = 0;
+ }
- dhd = (dhd_info_t *)(pub->info);
-#ifdef BCMDBUS
- spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
-#else
- spin_unlock_bh(&dhd->txqlock);
-#endif /* BCMDBUS */
-}
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ }
-void
-dhd_os_sdlock_rxq(dhd_pub_t *pub)
-{
+ return ret;
}
-void
-dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_suspend(&dhd->pub);
}
-static void
-dhd_os_rxflock(dhd_pub_t *pub)
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *)(pub->info);
- spin_lock_bh(&dhd->rxf_lock);
-
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_resume(&dhd->pub, stage);
}
-static void
-dhd_os_rxfunlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *)(pub->info);
- spin_unlock_bh(&dhd->rxf_lock);
-}
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
-#ifdef DHDTCPACK_SUPPRESS
-unsigned long
-dhd_os_tcpacklock(dhd_pub_t *pub)
+int net_os_set_suspend_disable(struct net_device *dev, int val)
{
- dhd_info_t *dhd;
- unsigned long flags = 0;
-
- dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
if (dhd) {
-#ifdef BCMSDIO
- spin_lock_bh(&dhd->tcpack_lock);
-#else
- spin_lock_irqsave(&dhd->tcpack_lock, flags);
-#endif /* BCMSDIO */
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
}
-
- return flags;
+ return ret;
}
-void
-dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
+int net_os_set_suspend(struct net_device *dev, int val, int force)
{
- dhd_info_t *dhd;
-
-#ifdef BCMSDIO
- BCM_REFERENCE(flags);
-#endif /* BCMSDIO */
-
- dhd = (dhd_info_t *)(pub->info);
+ int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd) {
-#ifdef BCMSDIO
- spin_unlock_bh(&dhd->tcpack_lock);
+#ifdef CONFIG_MACH_UNIVERSAL7420
+#endif /* CONFIG_MACH_UNIVERSAL7420 */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ ret = dhd_set_suspend(val, &dhd->pub);
#else
- spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
-#endif /* BCMSDIO */
+ ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif // endif
+#ifdef WL_CFG80211
+ wl_cfg80211_update_power_mode(dev);
+#endif // endif
}
+ return ret;
}
-#endif /* DHDTCPACK_SUPPRESS */
-uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
{
- uint8* buf;
- gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
- if (buf == NULL && kmalloc_if_fail)
- buf = kmalloc(size, flags);
+ if (dhd) {
+ DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, val));
+ dhd->pub.suspend_bcn_li_dtim = val;
+ }
- return buf;
+ return 0;
}
-void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+int net_os_set_max_dtim_enable(struct net_device *dev, int val)
{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd) {
+ DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.max_dtim_enable = TRUE;
+ } else {
+ dhd->pub.max_dtim_enable = FALSE;
+ }
+ } else {
+ return -1;
+ }
+
+ return 0;
}
-#if defined(WL_WIRELESS_EXT)
-struct iw_statistics *
-dhd_get_wireless_stats(struct net_device *dev)
+#ifdef DISABLE_DTIM_IN_SUSPEND
+int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
{
- int res = 0;
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (!dhd->pub.up) {
- return NULL;
+ if (dhd) {
+ DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.disable_dtim_in_suspend = TRUE;
+ } else {
+ dhd->pub.disable_dtim_in_suspend = FALSE;
+ }
+ } else {
+ return -1;
}
- res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
-
- if (res == 0)
- return &dhd->iw.wstats;
- else
- return NULL;
+ return 0;
}
-#endif /* defined(WL_WIRELESS_EXT) */
+#endif /* DISABLE_DTIM_IN_SUSPEND */
-static int
-dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
- wl_event_msg_t *event, void **data)
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
- int bcmerror = 0;
-#ifdef WL_CFG80211
- unsigned long flags = 0;
-#endif /* WL_CFG80211 */
- ASSERT(dhd != NULL);
+ int ret = 0;
-#ifdef SHOW_LOGTRACE
- bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
- &dhd->event_data);
-#else
- bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
- NULL);
-#endif /* SHOW_LOGTRACE */
- if (unlikely(bcmerror != BCME_OK)) {
- return bcmerror;
- }
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (ntoh32(event->event_type) == WLC_E_IF) {
- /* WLC_E_IF event types are consumed by wl_process_host_event.
- * For ifadd/del ops, the netdev ptr may not be valid at this
- * point. so return before invoking cfg80211/wext handlers.
- */
- return BCME_OK;
+ if (!dhd_master_mode)
+ add_remove = !add_remove;
+ DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
+ return 0;
}
-#if defined(WL_EXT_IAPSTA)
- wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data);
-#endif /* defined(WL_EXT_IAPSTA) */
-#if defined(WL_WIRELESS_EXT)
- if (event->bsscfgidx == 0) {
- /*
- * Wireless ext is on primary interface only
- */
- ASSERT(dhd->iflist[ifidx] != NULL);
- ASSERT(dhd->iflist[ifidx]->net != NULL);
-
- if (dhd->iflist[ifidx]->net) {
- wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
- }
+#ifdef BLOCK_IPV6_PACKET
+ /* customer want to use NO IPV6 packets only */
+ if (num == DHD_MULTICAST6_FILTER_NUM) {
+ return 0;
}
-#endif /* defined(WL_WIRELESS_EXT) */
+#endif /* BLOCK_IPV6_PACKET */
-#ifdef WL_CFG80211
- if (dhd->iflist[ifidx]->net) {
- spin_lock_irqsave(&dhd->pub.up_lock, flags);
- if (dhd->pub.up) {
- wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
- }
- spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
+ if (num >= dhd->pub.pktfilter_count) {
+ return -EINVAL;
}
-#endif /* defined(WL_CFG80211) */
- return (bcmerror);
+ ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+ return ret;
}
-/* send up locally generated event */
-void
-dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
{
- switch (ntoh32(event->event_type)) {
- /* Handle error case or further events here */
- default:
- break;
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhdp && dhdp->up) {
+ if (dhdp->in_suspend) {
+ if (!val || (val && !dhdp->suspend_disable_flag))
+ dhd_enable_packet_filter(val, dhdp);
+ }
}
+ return ret;
}
-#ifdef LOG_INTO_TCPDUMP
-void
-dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
{
- struct sk_buff *p, *skb;
- uint32 pktlen;
- int len;
- dhd_if_t *ifp;
- dhd_info_t *dhd;
- uchar *skb_data;
- int ifidx = 0;
- struct ether_header eth;
-
- pktlen = sizeof(eth) + data_len;
- dhd = dhdp->info;
-
- if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
- ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
-
- bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
- bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
- ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
- eth.ether_type = hton16(ETHER_TYPE_BRCM);
-
- bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
- bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
- skb = PKTTONATIVE(dhdp->osh, p);
- skb_data = skb->data;
- len = skb->len;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- ifidx = dhd_ifname2idx(dhd, "wlan0");
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL)
- ifp = dhd->iflist[0];
+ DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
+ return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
- ASSERT(ifp);
- skb->dev = ifp->net;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->data = skb_data;
- skb->len = len;
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret;
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+ goto done;
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- /* Send the packet */
- if (in_interrupt()) {
- netif_rx(skb);
- } else {
- netif_rx_ni(skb);
- }
- } else {
- /* Could not allocate a sk_buf */
- DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
- }
+done:
+ return ret;
}
-#endif /* LOG_INTO_TCPDUMP */
-void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+int
+dhd_dev_get_feature_set(struct net_device *dev)
{
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
- struct dhd_info *dhdinfo = dhd->info;
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
-#else
- int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+ dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhd = (&ptr->pub);
+ int feature_set = 0;
- dhd_os_sdunlock(dhd);
- wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
- dhd_os_sdlock(dhd);
-#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
- return;
-} /* dhd_init_static_strs_array */
+ if (FW_SUPPORTED(dhd, sta))
+ feature_set |= WIFI_FEATURE_INFRA;
+ if (FW_SUPPORTED(dhd, dualband))
+ feature_set |= WIFI_FEATURE_INFRA_5G;
+ if (FW_SUPPORTED(dhd, p2p))
+ feature_set |= WIFI_FEATURE_P2P;
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ feature_set |= WIFI_FEATURE_SOFT_AP;
+ if (FW_SUPPORTED(dhd, tdls))
+ feature_set |= WIFI_FEATURE_TDLS;
+ if (FW_SUPPORTED(dhd, vsdb))
+ feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+ if (FW_SUPPORTED(dhd, nan)) {
+ feature_set |= WIFI_FEATURE_NAN;
+ /* NAN is essentail for d2d rtt */
+ if (FW_SUPPORTED(dhd, rttd2d))
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ }
+#ifdef RTT_SUPPORT
+ if (dhd->rtt_supported) {
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ feature_set |= WIFI_FEATURE_D2AP_RTT;
+ }
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+ feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
-void dhd_wait_event_wakeup(dhd_pub_t *dhd)
-{
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
- struct dhd_info *dhdinfo = dhd->info;
- if (waitqueue_active(&dhdinfo->ctrl_wait))
- wake_up(&dhdinfo->ctrl_wait);
-#endif // endif
- return;
+#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
+ if (dhd_is_pno_supported(dhd)) {
+ feature_set |= WIFI_FEATURE_PNO;
+#ifdef GSCAN_SUPPORT
+ /* terence 20171115: remove to get GTS PASS
+ * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
+ */
+// feature_set |= WIFI_FEATURE_GSCAN;
+// feature_set |= WIFI_FEATURE_HAL_EPNO;
+#endif /* GSCAN_SUPPORT */
+ }
+#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
+#ifdef RSSI_MONITOR_SUPPORT
+ if (FW_SUPPORTED(dhd, rssi_mon)) {
+ feature_set |= WIFI_FEATURE_RSSI_MONITOR;
+ }
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WL11U
+ feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+#ifdef NDO_CONFIG_SUPPORT
+ feature_set |= WIFI_FEATURE_CONFIG_NDO;
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef KEEP_ALIVE
+ feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
+#endif /* KEEP_ALIVE */
+#ifdef FILTER_IE
+ if (FW_SUPPORTED(dhd, fie)) {
+ feature_set |= WIFI_FEATURE_FILTER_IE;
+ }
+#endif /* FILTER_IE */
+#ifdef ROAMEXP_SUPPORT
+ /* Check if the Android O roam feature is supported by FW */
+ if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
+ feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
+ }
+#endif /* ROAMEXP_SUPPORT */
+ return feature_set;
}
-#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
int
-dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
{
- int ret;
-
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
- return BCME_ERROR;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
- if (flag == TRUE) {
- /* Issue wl down command before resetting the chip */
- if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
- DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
- }
-#ifdef PROP_TXSTATUS
- if (dhd->pub.wlfc_enabled) {
- dhd_wlfc_deinit(&dhd->pub);
- }
-#endif /* PROP_TXSTATUS */
-#ifdef PNO_SUPPORT
- if (dhd->pub.pno_state) {
- dhd_pno_deinit(&dhd->pub);
- }
-#endif // endif
-#ifdef RTT_SUPPORT
- if (dhd->pub.rtt_state) {
- dhd_rtt_deinit(&dhd->pub);
- }
-#endif /* RTT_SUPPORT */
+ int feature_set_full;
+ int ret = 0;
-#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
- dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
-#endif /* DBG_PKT_MON */
- }
+ feature_set_full = dhd_dev_get_feature_set(dev);
-#ifdef BCMSDIO
- if (!flag) {
- dhd_update_fw_nv_path(dhd);
- /* update firmware and nvram path to sdio bus */
- dhd_bus_update_fw_nv_path(dhd->pub.bus,
- dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
- }
-#endif /* BCMSDIO */
+ /* Common feature set for all interface */
+ ret = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
+ (feature_set_full & WIFI_FEATURE_EPR);
- ret = dhd_bus_devreset(&dhd->pub, flag);
+ /* Specific feature group for each interface */
+ switch (num) {
+ case 0:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P) |
+ /* Not supported yet */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_PNO) |
+ (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
+ (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+ (feature_set_full & WIFI_FEATURE_GSCAN) |
+ (feature_set_full & WIFI_FEATURE_HOTSPOT) |
+ (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
+ break;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ case 1:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P);
+ /* Not yet verified NAN with P2P */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ break;
- if (flag) {
- /* Clear some flags for recovery logic */
- dhd->pub.dongle_trap_occured = 0;
- dhd->pub.iovar_timeout_occured = 0;
-#ifdef PCIE_FULL_DONGLE
- dhd->pub.d3ack_timeout_occured = 0;
- dhd->pub.livelock_occured = 0;
-#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_MAP_LOGGING
- dhd->pub.smmu_fault_occurred = 0;
-#endif /* DHD_MAP_LOGGING */
- }
+ case 2:
+ ret |= (feature_set_full & WIFI_FEATURE_NAN) |
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
+ break;
- if (ret) {
- DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ default:
+ ret = WIFI_FEATURE_INVALID;
+ DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
+ break;
}
return ret;
}
-#ifdef BCMSDIO
+#ifdef CUSTOM_FORCE_NODFS_FLAG
int
-dhd_net_bus_suspend(struct net_device *dev)
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_suspend(&dhd->pub);
+
+ if (nodfs)
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ else
+ dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+ return 0;
}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef NDO_CONFIG_SUPPORT
int
-dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_resume(&dhd->pub, stage);
-}
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
-#endif /* BCMSDIO */
-#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+ if (enable) {
+ /* enable ND offload feature (will be enabled in FW on suspend) */
+ dhdp->ndo_enable = TRUE;
-int net_os_set_suspend_disable(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+ /* Update changes of anycast address & DAD failed address */
+ ret = dhd_dev_ndo_update_inet6addr(dev);
+ if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+ DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ } else {
+ /* disable ND offload feature */
+ dhdp->ndo_enable = FALSE;
- if (dhd) {
- ret = dhd->pub.suspend_disable_flag;
- dhd->pub.suspend_disable_flag = val;
+ /* disable ND offload in FW */
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
+ }
}
return ret;
}
-int net_os_set_suspend(struct net_device *dev, int val, int force)
+/* #pragma used as a WAR to fix build failure,
+* ignore dropping of 'const' qualifier in 'list_entry' macro
+* this pragma disables the warning only for the following function
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static int
+dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
{
- int ret = 0;
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ int addr_count = 0;
- if (dhd) {
-#ifdef CONFIG_MACH_UNIVERSAL7420
-#endif /* CONFIG_MACH_UNIVERSAL7420 */
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- ret = dhd_set_suspend(val, &dhd->pub);
-#else
- ret = dhd_suspend_resume_helper(dhd, val, force);
-#endif // endif
-#ifdef WL_CFG80211
- wl_cfg80211_update_power_mode(dev);
-#endif // endif
+ /* lock */
+ read_lock_bh(&inet6->lock);
+
+ /* Count valid unicast address */
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ if ((ifa->flags & IFA_F_DADFAILED) == 0) {
+ addr_count++;
+ }
}
- return ret;
+
+ /* Count anycast address */
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ addr_count++;
+ acaddr = acaddr->aca_next;
+ }
+
+ /* unlock */
+ read_unlock_bh(&inet6->lock);
+
+ return addr_count;
}
-int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+int
+dhd_dev_ndo_update_inet6addr(struct net_device *dev)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ struct inet6_dev *inet6;
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ struct in6_addr *ipv6_addr = NULL;
+ int cnt, i;
+ int ret = BCME_OK;
- if (dhd)
- dhd->pub.suspend_bcn_li_dtim = val;
+ /*
+ * this function evaulates host ip address in struct inet6_dev
+ * unicast addr in inet6_dev->addr_list
+ * anycast addr in inet6_dev->ac_list
+ * while evaluating inet6_dev, read_lock_bh() is required to prevent
+ * access on null(freed) pointer.
+ */
- return 0;
-}
+ if (dev) {
+ inet6 = dev->ip6_ptr;
+ if (!inet6) {
+ DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ dhdp = &dhd->pub;
+
+ if (dhd_net2idx(dhd, dev) != 0) {
+ DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Check host IP overflow */
+ cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
+ if (cnt > dhdp->ndo_max_host_ip) {
+ if (!dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = TRUE;
+ /* Disable ND offload in FW */
+ DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ }
+
+ return ret;
+ }
-int net_os_set_max_dtim_enable(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ /*
+ * Allocate ipv6 addr buffer to store addresses to be added/removed.
+ * driver need to lock inet6_dev while accessing structure. but, driver
+ * cannot use ioctl while inet6_dev locked since it requires scheduling
+ * hence, copy addresses to the buffer and do ioctl after unlock.
+ */
+ ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
+ sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ if (!ipv6_addr) {
+ DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
- if (dhd) {
- DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
- __FUNCTION__, (val ? "Enable" : "Disable")));
- if (val) {
- dhd->pub.max_dtim_enable = TRUE;
- } else {
- dhd->pub.max_dtim_enable = FALSE;
+ /* Find DAD failed unicast address to be removed */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ /* DAD failed unicast address */
+ if ((ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
+ cnt++;
}
- } else {
- return -1;
}
+ read_unlock_bh(&inet6->lock);
- return 0;
-}
-
-#if defined(PCIE_FULL_DONGLE)
-void
-dhd_pcie_backplane_access_lock(dhd_pub_t * pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- spin_lock_bh(&dhd->backplane_access_lock);
-}
+ /* Remove DAD failed unicast address */
+ for (i = 0; i < cnt; i++) {
+ DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
-void
-dhd_pcie_backplane_access_unlock(dhd_pub_t * pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- spin_unlock_bh(&dhd->backplane_access_lock);
-}
-#endif /* defined(PCIE_FULL_DONGLE) */
+ /* Remove all anycast address */
+ ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
-#ifdef PKT_FILTER_SUPPORT
-int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
-{
- int ret = 0;
+ /*
+ * if ND offload was disabled due to host ip overflow,
+ * attempt to add valid unicast address.
+ */
+ if (dhdp->ndo_host_ip_overflow) {
+ /* Find valid unicast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ /* valid unicast address */
+ if (!(ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr,
+ sizeof(struct in6_addr));
+ cnt++;
+ }
+ }
+ read_unlock_bh(&inet6->lock);
-#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ /* Add valid unicast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+ }
- if (!dhd_master_mode)
- add_remove = !add_remove;
- DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
- if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
- return 0;
+ /* Find anycast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ if (cnt < dhdp->ndo_max_host_ip) {
+ memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
+ cnt++;
+ }
+ acaddr = acaddr->aca_next;
}
+ read_unlock_bh(&inet6->lock);
-#ifdef BLOCK_IPV6_PACKET
- /* customer want to use NO IPV6 packets only */
- if (num == DHD_MULTICAST6_FILTER_NUM) {
- return 0;
+ /* Add anycast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
}
-#endif /* BLOCK_IPV6_PACKET */
- if (num >= dhd->pub.pktfilter_count) {
- return -EINVAL;
+ /* Now All host IP addr were added successfully */
+ if (dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = FALSE;
+ if (dhdp->in_suspend) {
+ /* drvier is in (early) suspend state, need to enable ND offload in FW */
+ DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, TRUE);
+ }
}
- ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
-#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+done:
+ if (ipv6_addr) {
+ MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ }
return ret;
}
+#pragma GCC diagnostic pop
-int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
{
- int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- /* Packet filtering is set only if we still in early-suspend and
- * we need either to turn it ON or turn it OFF
- * We can always turn it OFF in case of early-suspend, but we turn it
- * back ON only if suspend_disable_flag was not set
- */
- if (dhdp && dhdp->up) {
- if (dhdp->in_suspend) {
- if (!val || (val && !dhdp->suspend_disable_flag))
- dhd_enable_packet_filter(val, dhdp);
- }
- }
- return ret;
+ return (dhd_pno_stop_for_ssid(&dhd->pub));
}
-/* function to enable/disable packet for Network device */
-int net_os_enable_packet_filter(struct net_device *dev, int val)
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
- return dhd_os_enable_packet_filter(&dhd->pub, val);
+ return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+ pno_repeat, pno_freq_expo_max, channel_list, nchan));
}
-#endif /* PKT_FILTER_SUPPORT */
+/* Linux wrapper to call common dhd_pno_enable */
int
-dhd_dev_init_ioctl(struct net_device *dev)
+dhd_dev_pno_enable(struct net_device *dev, int enable)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret;
-
- if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
- goto done;
-done:
- return ret;
+ return (dhd_pno_enable(&dhd->pub, enable));
}
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
int
-dhd_dev_get_feature_set(struct net_device *dev)
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params)
{
- dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhd = (&ptr->pub);
- int feature_set = 0;
-
- if (FW_SUPPORTED(dhd, sta))
- feature_set |= WIFI_FEATURE_INFRA;
- if (FW_SUPPORTED(dhd, dualband))
- feature_set |= WIFI_FEATURE_INFRA_5G;
- if (FW_SUPPORTED(dhd, p2p))
- feature_set |= WIFI_FEATURE_P2P;
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
- feature_set |= WIFI_FEATURE_SOFT_AP;
- if (FW_SUPPORTED(dhd, tdls))
- feature_set |= WIFI_FEATURE_TDLS;
- if (FW_SUPPORTED(dhd, vsdb))
- feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
- if (FW_SUPPORTED(dhd, nan)) {
- feature_set |= WIFI_FEATURE_NAN;
- /* NAN is essentail for d2d rtt */
- if (FW_SUPPORTED(dhd, rttd2d))
- feature_set |= WIFI_FEATURE_D2D_RTT;
- }
-#ifdef RTT_SUPPORT
- if (dhd->rtt_supported) {
- feature_set |= WIFI_FEATURE_D2D_RTT;
- feature_set |= WIFI_FEATURE_D2AP_RTT;
- }
-#endif /* RTT_SUPPORT */
-#ifdef LINKSTAT_SUPPORT
- feature_set |= WIFI_FEATURE_LINKSTAT;
-#endif /* LINKSTAT_SUPPORT */
-
-#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
- if (dhd_is_pno_supported(dhd)) {
- feature_set |= WIFI_FEATURE_PNO;
-#ifdef GSCAN_SUPPORT
- /* terence 20171115: remove to get GTS PASS
- * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
- */
-// feature_set |= WIFI_FEATURE_GSCAN;
-// feature_set |= WIFI_FEATURE_HAL_EPNO;
-#endif /* GSCAN_SUPPORT */
- }
-#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
-#ifdef RSSI_MONITOR_SUPPORT
- if (FW_SUPPORTED(dhd, rssi_mon)) {
- feature_set |= WIFI_FEATURE_RSSI_MONITOR;
- }
-#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef WL11U
- feature_set |= WIFI_FEATURE_HOTSPOT;
-#endif /* WL11U */
-#ifdef NDO_CONFIG_SUPPORT
- feature_set |= WIFI_FEATURE_CONFIG_NDO;
-#endif /* NDO_CONFIG_SUPPORT */
-#ifdef KEEP_ALIVE
- feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
-#endif /* KEEP_ALIVE */
-#ifdef FILTER_IE
- if (FW_SUPPORTED(dhd, fie)) {
- feature_set |= WIFI_FEATURE_FILTER_IE;
- }
-#endif /* FILTER_IE */
-#ifdef ROAMEXP_SUPPORT
- /* Check if the Android O roam feature is supported by FW */
- if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
- feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
- }
-#endif /* ROAMEXP_SUPPORT */
- return feature_set;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_stop_for_batch(&dhd->pub));
}
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
int
-dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
{
- int feature_set_full;
- int ret = 0;
-
- feature_set_full = dhd_dev_get_feature_set(dev);
-
- /* Common feature set for all interface */
- ret = (feature_set_full & WIFI_FEATURE_INFRA) |
- (feature_set_full & WIFI_FEATURE_INFRA_5G) |
- (feature_set_full & WIFI_FEATURE_D2D_RTT) |
- (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
- (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
- (feature_set_full & WIFI_FEATURE_EPR);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
- /* Specific feature group for each interface */
- switch (num) {
- case 0:
- ret |= (feature_set_full & WIFI_FEATURE_P2P) |
- /* Not supported yet */
- /* (feature_set_full & WIFI_FEATURE_NAN) | */
- (feature_set_full & WIFI_FEATURE_TDLS) |
- (feature_set_full & WIFI_FEATURE_PNO) |
- (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
- (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
- (feature_set_full & WIFI_FEATURE_GSCAN) |
- (feature_set_full & WIFI_FEATURE_HOTSPOT) |
- (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
- break;
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
- case 1:
- ret |= (feature_set_full & WIFI_FEATURE_P2P);
- /* Not yet verified NAN with P2P */
- /* (feature_set_full & WIFI_FEATURE_NAN) | */
- break;
+#if defined(PNO_SUPPORT)
+#ifdef GSCAN_SUPPORT
+bool
+dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- case 2:
- ret |= (feature_set_full & WIFI_FEATURE_NAN) |
- (feature_set_full & WIFI_FEATURE_TDLS) |
- (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
- break;
+ return (dhd_is_legacy_pno_enabled(&dhd->pub));
+}
- default:
- ret = WIFI_FEATURE_INVALID;
- DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
- break;
+int
+dhd_dev_set_epno(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ return BCME_ERROR;
}
-
- return ret;
+ return dhd_pno_set_epno(&dhd->pub);
}
-
-#ifdef CUSTOM_FORCE_NODFS_FLAG
int
-dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
+dhd_dev_flush_fw_epno(struct net_device *dev)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- if (nodfs)
- dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
- else
- dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
- dhd->pub.force_country_change = TRUE;
- return 0;
+ if (!dhd) {
+ return BCME_ERROR;
+ }
+ return dhd_pno_flush_fw_epno(&dhd->pub);
}
-#endif /* CUSTOM_FORCE_NODFS_FLAG */
-#ifdef NDO_CONFIG_SUPPORT
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
int
-dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
- if (enable) {
- /* enable ND offload feature (will be enabled in FW on suspend) */
- dhdp->ndo_enable = TRUE;
+ return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
- /* Update changes of anycast address & DAD failed address */
- ret = dhd_dev_ndo_update_inet6addr(dev);
- if ((ret < 0) && (ret != BCME_NORESOURCE)) {
- DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
- return ret;
- }
- } else {
- /* disable ND offload feature */
- dhdp->ndo_enable = FALSE;
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+int
+dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* disable ND offload in FW */
- ret = dhd_ndo_enable(dhdp, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
- }
- }
- return ret;
+ return (dhd_wait_batch_results_complete(&dhd->pub));
}
-/* #pragma used as a WAR to fix build failure,
-* ignore dropping of 'const' qualifier in 'list_entry' macro
-* this pragma disables the warning only for the following function
-*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+int
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-static int
-dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
+ return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
{
- struct inet6_ifaddr *ifa;
- struct ifacaddr6 *acaddr = NULL;
- int addr_count = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* lock */
- read_lock_bh(&inet6->lock);
+ return (dhd_pno_unlock_batch_results(&dhd->pub));
+}
- /* Count valid unicast address */
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- if ((ifa->flags & IFA_F_DADFAILED) == 0) {
- addr_count++;
- }
- }
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int
+dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* Count anycast address */
- acaddr = inet6->ac_list;
- while (acaddr) {
- addr_count++;
- acaddr = acaddr->aca_next;
- }
+ return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+}
- /* unlock */
- read_unlock_bh(&inet6->lock);
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int
+dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return addr_count;
+ return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
}
-int
-dhd_dev_ndo_update_inet6addr(struct net_device *dev)
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void *
+dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
{
- dhd_info_t *dhd;
- dhd_pub_t *dhdp;
- struct inet6_dev *inet6;
- struct inet6_ifaddr *ifa;
- struct ifacaddr6 *acaddr = NULL;
- struct in6_addr *ipv6_addr = NULL;
- int cnt, i;
- int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /*
- * this function evaulates host ip address in struct inet6_dev
- * unicast addr in inet6_dev->addr_list
- * anycast addr in inet6_dev->ac_list
- * while evaluating inet6_dev, read_lock_bh() is required to prevent
- * access on null(freed) pointer.
- */
+ return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
+}
- if (dev) {
- inet6 = dev->ip6_ptr;
- if (!inet6) {
- DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
- return BCME_ERROR;
- }
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void *
+dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void *data, uint32 len, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
- return BCME_ERROR;
- }
- dhdp = &dhd->pub;
+ return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
+}
- if (dhd_net2idx(dhd, dev) != 0) {
- DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
- return BCME_ERROR;
- }
- } else {
- DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
- return BCME_ERROR;
- }
+void
+dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* Check host IP overflow */
- cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
- if (cnt > dhdp->ndo_max_host_ip) {
- if (!dhdp->ndo_host_ip_overflow) {
- dhdp->ndo_host_ip_overflow = TRUE;
- /* Disable ND offload in FW */
- DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, FALSE);
- }
+ dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
- return ret;
- }
+ return;
+}
+
+int
+dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /*
- * Allocate ipv6 addr buffer to store addresses to be added/removed.
- * driver need to lock inet6_dev while accessing structure. but, driver
- * cannot use ioctl while inet6_dev locked since it requires scheduling
- * hence, copy addresses to the buffer and do ioctl after unlock.
- */
- ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
- sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
- if (!ipv6_addr) {
- DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
- return BCME_NOMEM;
- }
+ return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
- /* Find DAD failed unicast address to be removed */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- /* DAD failed unicast address */
- if ((ifa->flags & IFA_F_DADFAILED) &&
- (cnt < dhdp->ndo_max_host_ip)) {
- memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
- cnt++;
- }
- }
- read_unlock_bh(&inet6->lock);
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int
+dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* Remove DAD failed unicast address */
- for (i = 0; i < cnt; i++) {
- DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
- ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
- if (ret < 0) {
- goto done;
- }
- }
+ return (dhd_retreive_batch_scan_results(&dhd->pub));
+}
- /* Remove all anycast address */
- ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
- if (ret < 0) {
- goto done;
- }
+/* Linux wrapper to call common dhd_pno_process_epno_result */
+void * dhd_dev_process_epno_result(struct net_device *dev,
+ const void *data, uint32 event, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /*
- * if ND offload was disabled due to host ip overflow,
- * attempt to add valid unicast address.
- */
- if (dhdp->ndo_host_ip_overflow) {
- /* Find valid unicast address */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- /* valid unicast address */
- if (!(ifa->flags & IFA_F_DADFAILED) &&
- (cnt < dhdp->ndo_max_host_ip)) {
- memcpy(&ipv6_addr[cnt], &ifa->addr,
- sizeof(struct in6_addr));
- cnt++;
- }
- }
- read_unlock_bh(&inet6->lock);
+ return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
+}
- /* Add valid unicast address */
- for (i = 0; i < cnt; i++) {
- ret = dhd_ndo_add_ip_with_type(dhdp,
- (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
- if (ret < 0) {
- goto done;
- }
- }
- }
+int
+dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+ wlc_roam_exp_params_t *roam_param)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
+ int err;
- /* Find anycast address */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- acaddr = inet6->ac_list;
- while (acaddr) {
- if (cnt < dhdp->ndo_max_host_ip) {
- memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
- cnt++;
- }
- acaddr = acaddr->aca_next;
+ if (!roam_param) {
+ return BCME_BADARG;
}
- read_unlock_bh(&inet6->lock);
- /* Add anycast address */
- for (i = 0; i < cnt; i++) {
- ret = dhd_ndo_add_ip_with_type(dhdp,
- (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
- if (ret < 0) {
- goto done;
- }
- }
+ DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
+ roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
+ DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
+ roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
+ roam_param->cur_bssid_boost));
+ DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
+ roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
- /* Now All host IP addr were added successfully */
- if (dhdp->ndo_host_ip_overflow) {
- dhdp->ndo_host_ip_overflow = FALSE;
- if (dhdp->in_suspend) {
- /* drvier is in (early) suspend state, need to enable ND offload in FW */
- DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, TRUE);
- }
+ memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
+ if (dhd->pub.lazy_roam_enable) {
+ roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
}
-
-done:
- if (ipv6_addr) {
- MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
}
-
- return ret;
+ return err;
}
-#pragma GCC diagnostic pop
-
-#endif /* NDO_CONFIG_SUPPORT */
-#ifdef PNO_SUPPORT
-/* Linux wrapper to call common dhd_pno_stop_for_ssid */
int
-dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
- return (dhd_pno_stop_for_ssid(&dhd->pub));
+ memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ if (enable) {
+ roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
+ }
+
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ } else {
+ dhd->pub.lazy_roam_enable = (enable != 0);
+ }
+ return err;
}
-/* Linux wrapper to call common dhd_pno_set_for_ssid */
int
-dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
- uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+ wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int err;
+ uint len;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
- pno_repeat, pno_freq_expo_max, channel_list, nchan));
+ bssid_pref->version = BSSID_PREF_LIST_VERSION;
+ /* By default programming bssid pref flushes out old values */
+ bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
+ len = sizeof(wl_bssid_pref_cfg_t);
+ if (bssid_pref->count) {
+ len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
+ }
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
+ (char *)bssid_pref, len, NULL, 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+ }
+ return err;
}
+#endif /* GSCAN_SUPPORT */
-/* Linux wrapper to call common dhd_pno_enable */
+#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
int
-dhd_dev_pno_enable(struct net_device *dev, int enable)
+dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+ uint32 len, uint32 flush)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int macmode;
- return (dhd_pno_enable(&dhd->pub, enable));
+ if (blacklist) {
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
+ len, TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
+ return err;
+ }
+ }
+ /* By default programming blacklist flushes out old values */
+ macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
+ sizeof(macmode), TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-/* Linux wrapper to call common dhd_pno_set_for_hotlist */
int
-dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
- struct dhd_pno_hotlist_params *hotlist_params)
+dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
+ uint32 len, uint32 flush)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_ssid_whitelist_t whitelist_ssid_flush;
+
+ if (!ssid_whitelist) {
+ if (flush) {
+ ssid_whitelist = &whitelist_ssid_flush;
+ ssid_whitelist->ssid_count = 0;
+ } else {
+ DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+ }
+ ssid_whitelist->version = SSID_WHITELIST_VERSION;
+ ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
+ 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
-int
-dhd_dev_pno_stop_for_batch(struct net_device *dev)
+#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_stop_for_batch(&dhd->pub));
-}
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
-int
-dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+ return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#endif // endif
-/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+#ifdef RSSI_MONITOR_SUPPORT
int
-dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+ int8 max_rssi, int8 min_rssi)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+ int err;
+ wl_rssi_monitor_cfg_t rssi_monitor;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ rssi_monitor.version = RSSI_MONITOR_VERSION;
+ rssi_monitor.max_rssi = max_rssi;
+ rssi_monitor.min_rssi = min_rssi;
+ rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
+ err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
+ NULL, 0, TRUE);
+ if (err < 0 && err != BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-#endif /* PNO_SUPPORT */
+#endif /* RSSI_MONITOR_SUPPORT */
-#if defined(PNO_SUPPORT)
-#ifdef GSCAN_SUPPORT
-bool
-dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
+#ifdef DHDTCPACK_SUPPRESS
+int
+dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
{
+ int err;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_is_legacy_pno_enabled(&dhd->pub));
+ err = dhd_tcpack_suppress_set(&dhd->pub, enable);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
+ }
+ return err;
}
+#endif /* DHDTCPACK_SUPPRESS */
int
-dhd_dev_set_epno(struct net_device *dev)
+dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ if (!dhdp || !oui) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
return BCME_ERROR;
}
- return dhd_pno_set_epno(&dhd->pub);
+ if (ETHER_ISMULTI(oui)) {
+ DHD_ERROR(("Expected unicast OUI\n"));
+ return BCME_ERROR;
+ } else {
+ uint8 *rand_mac_oui = dhdp->rand_mac_oui;
+ memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
+ DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
+ MACOUI2STRDBG(rand_mac_oui)));
+ }
+ return BCME_OK;
}
+
int
-dhd_dev_flush_fw_epno(struct net_device *dev)
+dhd_set_rand_mac_oui(dhd_pub_t *dhd)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- return BCME_ERROR;
+ int err;
+ wl_pfn_macaddr_cfg_t wl_cfg;
+ uint8 *rand_mac_oui = dhd->rand_mac_oui;
+
+ memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
+ memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
+ wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
+ if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
+ wl_cfg.flags = 0;
+ } else {
+ wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
}
- return dhd_pno_flush_fw_epno(&dhd->pub);
+
+ DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
+ MACOUI2STRDBG(rand_mac_oui)));
+
+ err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+ }
+ return err;
}
+#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
int
-dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
- void *buf, bool flush)
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+ return (dhd_rtt_set_cfg(&dhd->pub, buf));
}
-/* Linux wrapper to call common dhd_wait_batch_results_complete */
int
-dhd_dev_wait_batch_results_complete(struct net_device *dev)
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_wait_batch_results_complete(&dhd->pub));
+ return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
}
-/* Linux wrapper to call common dhd_pno_lock_batch_results */
int
-dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_lock_batch_results(&dhd->pub));
+ return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
}
-/* Linux wrapper to call common dhd_pno_unlock_batch_results */
-void
-dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_unlock_batch_results(&dhd->pub));
+ return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
}
-/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
int
-dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+ return (dhd_rtt_capability(&dhd->pub, capa));
}
-/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
int
-dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+ return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
}
-/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
-void *
-dhd_dev_hotlist_scan_event(struct net_device *dev,
- const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
+int
+dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
+ return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
}
-/* Linux wrapper to call common dhd_process_full_gscan_result */
-void *
-dhd_dev_process_full_gscan_result(struct net_device *dev,
-const void *data, uint32 len, int *send_evt_bytes)
+int dhd_dev_rtt_cancel_responder(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_cancel_responder(&dhd->pub));
+}
+
+#endif /* RTT_SUPPORT */
+
+#ifdef KEEP_ALIVE
+#define KA_TEMP_BUF_SIZE 512
+#define KA_FRAME_SIZE 300
+
+int
+dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
+ uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
+{
+ const int ETHERTYPE_LEN = 2;
+ char *pbuf = NULL;
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
+ int buf_len = 0;
+ int str_len = 0;
+ int res = BCME_ERROR;
+ int len_bytes = 0;
+ int i = 0;
+
+ /* ether frame to have both max IP pkt (256 bytes) and ether header */
+ char *pmac_frame = NULL;
+ char *pmac_frame_begin = NULL;
+
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!dhd_support_sta_mode(dhd_pub))
+ return res;
+
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
+ DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ res = BCME_NOMEM;
+ return res;
+ }
+
+ if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
+ DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
+ res = BCME_NOMEM;
+ goto exit;
+ }
+ pmac_frame_begin = pmac_frame;
+
+ /*
+ * Get current mkeep-alive status.
+ */
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
+ KA_TEMP_BUF_SIZE, FALSE);
+ if (res < 0) {
+ DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check available ID whether it is occupied */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
+ __FUNCTION__, mkeep_alive_id));
+
+ /* Current occupied ID info */
+ DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
+ DHD_ERROR((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
+
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ DHD_ERROR(("\n"));
+
+ res = BCME_NOTFOUND;
+ goto exit;
+ }
+ }
+
+ /* Request the specified ID */
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+ memset(pbuf, 0, KA_TEMP_BUF_SIZE);
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(pbuf, str, str_len);
+ pbuf[str_len] = '\0';
+
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
+ mkeep_alive_pkt.period_msec = htod32(period_msec);
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+ /* ID assigned */
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+
+ /*
+ * Build up Ethernet Frame
+ */
+
+ /* Mapping dest mac addr */
+ memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
+ pmac_frame += ETHER_ADDR_LEN;
- return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
-}
+ /* Mapping src mac addr */
+ memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
+ pmac_frame += ETHER_ADDR_LEN;
-void
-dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
+ *(pmac_frame++) = 0x08;
+ *(pmac_frame++) = 0x00;
- dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+ /* Mapping IP pkt */
+ memcpy(pmac_frame, ip_pkt, ip_pkt_len);
+ pmac_frame += ip_pkt_len;
- return;
-}
+ /*
+ * Length of ether frame (assume to be all hexa bytes)
+ * = src mac + dst mac + ether type + ip pkt len
+ */
+ len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
+ memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
+ buf_len += len_bytes;
+ mkeep_alive_pkt.len_bytes = htod16(len_bytes);
-int
-dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /*
+ * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
- return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+ res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
+exit:
+ if (pmac_frame_begin) {
+ MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
+ pmac_frame_begin = NULL;
+ }
+ if (pbuf) {
+ MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
+ pbuf = NULL;
+ }
+ return res;
}
-/* Linux wrapper to call common dhd_retreive_batch_scan_results */
int
-dhd_dev_retrieve_batch_scan(struct net_device *dev)
+dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *pbuf = NULL;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
+ int res = BCME_ERROR;
+ int i = 0;
- return (dhd_retreive_batch_scan_results(&dhd->pub));
-}
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!dhd_support_sta_mode(dhd_pub))
+ return res;
-/* Linux wrapper to call common dhd_pno_process_epno_result */
-void * dhd_dev_process_epno_result(struct net_device *dev,
- const void *data, uint32 event, int *send_evt_bytes)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
- return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
-}
+ /*
+ * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
+ */
+ if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
+ DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ return res;
+ }
-int
-dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
- wlc_roam_exp_params_t *roam_param)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_roam_exp_cfg_t roam_exp_cfg;
- int err;
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
+ sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
+ if (res < 0) {
+ DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check occupied ID */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
+ DHD_INFO((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
- if (!roam_param) {
- return BCME_BADARG;
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ DHD_INFO(("\n"));
}
- DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
- roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
- DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
- roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
- roam_param->cur_bssid_boost));
- DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
- roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
+ /* Make it stop if available */
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
- memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
- roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
- roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
- if (dhd->pub.lazy_roam_enable) {
- roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
+ mkeep_alive_pkt.period_msec = 0;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
+ (char *)&mkeep_alive_pkt,
+ WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
+ } else {
+ DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
+ res = BCME_NOTFOUND;
}
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
- (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
- TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+exit:
+ if (pbuf) {
+ MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
+ pbuf = NULL;
}
- return err;
+ return res;
}
+#endif /* KEEP_ALIVE */
-int
-dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static void _dhd_apf_lock_local(dhd_info_t *dhd)
{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_roam_exp_cfg_t roam_exp_cfg;
-
- memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
- roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
- if (enable) {
- roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
+ if (dhd) {
+ mutex_lock(&dhd->dhd_apf_mutex);
}
+}
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
- (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
- TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
- } else {
- dhd->pub.lazy_roam_enable = (enable != 0);
+static void _dhd_apf_unlock_local(dhd_info_t *dhd)
+{
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_apf_mutex);
}
- return err;
}
-int
-dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
- wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
+static int
+__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+ u8* program, uint32 program_len)
{
- int err;
- uint len;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_t * pkt_filterp;
+ wl_apf_program_t *apf_program;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ char cmd[] = "pkt_filter_add";
- bssid_pref->version = BSSID_PREF_LIST_VERSION;
- /* By default programming bssid pref flushes out old values */
- bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
- len = sizeof(wl_bssid_pref_cfg_t);
- if (bssid_pref->count) {
- len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
- (char *)bssid_pref, len, NULL, 0, TRUE);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+
+ cmd_len = sizeof(cmd);
+
+ /* Check if the program_len is more than the expected len
+ * and if the program is NULL return from here.
+ */
+ if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
+ DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
+ __FUNCTION__, program_len, program));
+ return -EINVAL;
}
- return err;
-}
-#endif /* GSCAN_SUPPORT */
+ buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
+ WL_APF_PROGRAM_FIXED_LEN + program_len;
-#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
-int
-dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
- uint32 len, uint32 flush)
-{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- int macmode;
+ buf = MALLOCZ(dhdp->osh, buf_len);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
+ }
- if (blacklist) {
- err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
- len, TRUE, 0);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
- return err;
- }
+ memcpy(buf, cmd, cmd_len);
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->negate_match = htod32(FALSE);
+ pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
+
+ apf_program = &pkt_filterp->u.apf_program;
+ apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
+ apf_program->instr_len = htod16(program_len);
+ memcpy(apf_program->instrs, program, program_len);
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
}
- /* By default programming blacklist flushes out old values */
- macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
- err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
- sizeof(macmode), TRUE, 0);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
+
+ if (buf) {
+ MFREE(dhdp->osh, buf, buf_len);
}
- return err;
+ return ret;
}
-int
-dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
- uint32 len, uint32 flush)
+static int
+__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+ uint32 mode, uint32 enable)
{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_ssid_whitelist_t whitelist_ssid_flush;
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_enable_t * pkt_filterp;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ char cmd[] = "pkt_filter_enable";
- if (!ssid_whitelist) {
- if (flush) {
- ssid_whitelist = &whitelist_ssid_flush;
- ssid_whitelist->ssid_count = 0;
- } else {
- DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
- return BCME_BADARG;
- }
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- ssid_whitelist->version = SSID_WHITELIST_VERSION;
- ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
- 0, TRUE);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+
+ cmd_len = sizeof(cmd);
+ buf_len = cmd_len + sizeof(*pkt_filterp);
+
+ buf = MALLOCZ(dhdp->osh, buf_len);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
}
- return err;
-}
-#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
-/* Linux wrapper to call common dhd_pno_get_gscan */
-void *
-dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
- void *info, uint32 *len)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ memcpy(buf, cmd, cmd_len);
- return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
+ pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->enable = htod32(enable);
+
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ goto exit;
+ }
+
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
+ WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ }
+
+exit:
+ if (buf) {
+ MFREE(dhdp->osh, buf, buf_len);
+ }
+ return ret;
}
-#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
-#endif // endif
-#ifdef RSSI_MONITOR_SUPPORT
-int
-dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
- int8 max_rssi, int8 min_rssi)
+static int
+__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
{
- int err;
- wl_rssi_monitor_cfg_t rssi_monitor;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
- rssi_monitor.version = RSSI_MONITOR_VERSION;
- rssi_monitor.max_rssi = max_rssi;
- rssi_monitor.min_rssi = min_rssi;
- rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
- err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
- NULL, 0, TRUE);
- if (err < 0 && err != BCME_UNSUPPORTED) {
- DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- return err;
+
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
+ htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ }
+
+ return ret;
}
-#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
-int
-dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
+void dhd_apf_lock(struct net_device *dev)
{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_lock_local(dhd);
+}
- err = dhd_tcpack_suppress_set(&dhd->pub, enable);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
- }
- return err;
+void dhd_apf_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_unlock_local(dhd);
}
-#endif /* DHDTCPACK_SUPPRESS */
int
-dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
+dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
- if (!dhdp || !oui) {
- DHD_ERROR(("NULL POINTER : %s\n",
- __FUNCTION__));
- return BCME_ERROR;
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+
+ /*
+ * Notify Android framework that APF is not supported by setting
+ * version as zero.
+ */
+ *version = 0;
+ return BCME_OK;
}
- if (ETHER_ISMULTI(oui)) {
- DHD_ERROR(("Expected unicast OUI\n"));
- return BCME_ERROR;
- } else {
- uint8 *rand_mac_oui = dhdp->rand_mac_oui;
- memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
- DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
- MACOUI2STRDBG(rand_mac_oui)));
+
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- return BCME_OK;
+
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
+ __FUNCTION__, ret));
+ }
+
+ return ret;
}
int
-dhd_set_rand_mac_oui(dhd_pub_t *dhd)
+dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
{
- int err;
- wl_pfn_macaddr_cfg_t wl_cfg;
- uint8 *rand_mac_oui = dhd->rand_mac_oui;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
- memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
- memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
- wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
- if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
- wl_cfg.flags = 0;
- } else {
- wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+ *max_len = 0;
+ return BCME_OK;
}
- DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
- MACOUI2STRDBG(rand_mac_oui)));
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
- err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
+ __FUNCTION__, ret));
}
- return err;
+
+ return ret;
}
-#ifdef RTT_SUPPORT
-/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
int
-dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
+dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+ uint32 program_len)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret;
- return (dhd_rtt_set_cfg(&dhd->pub, buf));
-}
+ DHD_APF_LOCK(ndev);
-int
-dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /* delete, if filter already exists */
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (unlikely(ret)) {
+ goto exit;
+ }
+ dhdp->apf_set = FALSE;
+ }
- return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
-}
+ ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
+ if (ret) {
+ goto exit;
+ }
+ dhdp->apf_set = TRUE;
-int
-dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ /* Driver is still in (early) suspend state, enable APF filter back */
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+ }
+exit:
+ DHD_APF_UNLOCK(ndev);
- return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
+ return ret;
}
int
-dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
+dhd_dev_apf_enable_filter(struct net_device *ndev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
+ bool nan_dp_active = false;
- return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
-}
+ DHD_APF_LOCK(ndev);
+#ifdef WL_NAN
+ nan_dp_active = wl_cfgnan_is_dp_active(ndev);
+#endif /* WL_NAN */
+ if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
+ !nan_dp_active)) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+ }
-int
-dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ DHD_APF_UNLOCK(ndev);
- return (dhd_rtt_capability(&dhd->pub, capa));
+ return ret;
}
int
-dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
+dhd_dev_apf_disable_filter(struct net_device *ndev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
-}
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
-int
-dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
-}
+ DHD_APF_LOCK(ndev);
-int dhd_dev_rtt_cancel_responder(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_cancel_responder(&dhd->pub));
-}
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
+ }
-#endif /* RTT_SUPPORT */
+ DHD_APF_UNLOCK(ndev);
-#ifdef KEEP_ALIVE
-#define KA_TEMP_BUF_SIZE 512
-#define KA_FRAME_SIZE 300
+ return ret;
+}
int
-dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
- uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
+dhd_dev_apf_delete_filter(struct net_device *ndev)
{
- const int ETHERTYPE_LEN = 2;
- char *pbuf = NULL;
- const char *str;
- wl_mkeep_alive_pkt_t mkeep_alive_pkt;
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
- int buf_len = 0;
- int str_len = 0;
- int res = BCME_ERROR;
- int len_bytes = 0;
- int i = 0;
-
- /* ether frame to have both max IP pkt (256 bytes) and ether header */
- char *pmac_frame = NULL;
- char *pmac_frame_begin = NULL;
-
- /*
- * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
- * dongle shall reject a mkeep_alive request.
- */
- if (!dhd_support_sta_mode(dhd_pub))
- return res;
-
- DHD_TRACE(("%s execution\n", __FUNCTION__));
-
- if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
- res = BCME_NOMEM;
- return res;
- }
-
- if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
- res = BCME_NOMEM;
- goto exit;
- }
- pmac_frame_begin = pmac_frame;
-
- /*
- * Get current mkeep-alive status.
- */
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
- KA_TEMP_BUF_SIZE, FALSE);
- if (res < 0) {
- DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
- goto exit;
- } else {
- /* Check available ID whether it is occupied */
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
- if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
- DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
- __FUNCTION__, mkeep_alive_id));
-
- /* Current occupied ID info */
- DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
- DHD_ERROR((" Id : %d\n"
- " Period: %d msec\n"
- " Length: %d\n"
- " Packet: 0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes)));
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
- for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
- DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
- }
- DHD_ERROR(("\n"));
+ DHD_APF_LOCK(ndev);
- res = BCME_NOTFOUND;
- goto exit;
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (!ret) {
+ dhdp->apf_set = FALSE;
}
}
- /* Request the specified ID */
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
- memset(pbuf, 0, KA_TEMP_BUF_SIZE);
- str = "mkeep_alive";
- str_len = strlen(str);
- strncpy(pbuf, str, str_len);
- pbuf[str_len] = '\0';
-
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
- mkeep_alive_pkt.period_msec = htod32(period_msec);
- buf_len = str_len + 1;
- mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
- mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
-
- /* ID assigned */
- mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
-
- buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
-
- /*
- * Build up Ethernet Frame
- */
-
- /* Mapping dest mac addr */
- memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
- pmac_frame += ETHER_ADDR_LEN;
-
- /* Mapping src mac addr */
- memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
- pmac_frame += ETHER_ADDR_LEN;
+ DHD_APF_UNLOCK(ndev);
- /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
- *(pmac_frame++) = 0x08;
- *(pmac_frame++) = 0x00;
+ return ret;
+}
+#endif /* PKT_FILTER_SUPPORT && APF */
- /* Mapping IP pkt */
- memcpy(pmac_frame, ip_pkt, ip_pkt_len);
- pmac_frame += ip_pkt_len;
+static void dhd_hang_process(struct work_struct *work_data)
+{
+ struct net_device *dev;
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
+ struct net_device *ndev;
+ uint8 i = 0;
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work_data, dhd_info_t, dhd_hang_process_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
- /*
- * Length of ether frame (assume to be all hexa bytes)
- * = src mac + dst mac + ether type + ip pkt len
- */
- len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
- memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
- buf_len += len_bytes;
- mkeep_alive_pkt.len_bytes = htod16(len_bytes);
+ if (!dhd || !dhd->iflist[0])
+ return;
+ dev = dhd->iflist[0]->net;
+ if (dev) {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_send_priv_event(dev, "HANG");
+#endif // endif
+#if defined(WL_CFG80211)
+ wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif // endif
+ }
+#ifdef IFACE_HANG_FORCE_DEV_CLOSE
/*
- * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
- * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
- * guarantee that the buffer is properly aligned.
+ * For HW2, dev_close need to be done to recover
+ * from upper layer after hang. For Interposer skip
+ * dev_close so that dhd iovars can be used to take
+ * socramdump after crash, also skip for HW4 as
+ * handling of hang event is different
*/
- memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
- res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
-exit:
- if (pmac_frame_begin) {
- MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
- pmac_frame_begin = NULL;
- }
- if (pbuf) {
- MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
- pbuf = NULL;
+ rtnl_lock();
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
+ if (ndev && (ndev->flags & IFF_UP)) {
+ DHD_ERROR(("ndev->name : %s dev close\n",
+ ndev->name));
+ dev_close(ndev);
+ }
}
- return res;
+ rtnl_unlock();
+#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
}
-int
-dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+extern dhd_pub_t *link_recovery;
+void dhd_host_recover_link(void)
{
- char *pbuf = NULL;
- wl_mkeep_alive_pkt_t mkeep_alive_pkt;
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
- int res = BCME_ERROR;
- int i = 0;
-
- /*
- * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
- * dongle shall reject a mkeep_alive request.
- */
- if (!dhd_support_sta_mode(dhd_pub))
- return res;
+ DHD_ERROR(("****** %s ******\n", __FUNCTION__));
+ link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
+ dhd_bus_set_linkdown(link_recovery, TRUE);
+ dhd_os_send_hang_message(link_recovery);
+}
+EXPORT_SYMBOL(dhd_host_recover_link);
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
- DHD_TRACE(("%s execution\n", __FUNCTION__));
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+ int ret = 0;
+#ifdef WL_CFG80211
+ struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg;
+#ifdef DHD_FILE_DUMP_EVENT
+ dhd_info_t *dhd_info = NULL;
+#endif /* DHD_FILE_DUMP_EVENT */
+#endif /* WL_CFG80211 */
- /*
- * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
- */
- if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
- return res;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
+ return -EINVAL;
}
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
- sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
- if (res < 0) {
- DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
- goto exit;
- } else {
- /* Check occupied ID */
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
- DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
- DHD_INFO((" Id : %d\n"
- " Period: %d msec\n"
- " Length: %d\n"
- " Packet: 0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes)));
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ dhd_info = (dhd_info_t *)dhdp->info;
- for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
- DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
- }
- DHD_INFO(("\n"));
+ if (dhd_info->scheduled_memdump) {
+ DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
+ dhdp->hang_was_pending = 1;
+ return BCME_OK;
}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
- /* Make it stop if available */
- if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
- DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
-
- mkeep_alive_pkt.period_msec = 0;
- mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
- mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
- mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+#ifdef WL_CFG80211
+ primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ if (!primary_ndev) {
+ DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(primary_ndev);
+ if (!cfg) {
+ DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return -EINVAL;
+ }
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
- (char *)&mkeep_alive_pkt,
- WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
- } else {
- DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
- res = BCME_NOTFOUND;
+ /* Skip sending HANG event to framework if driver is not ready */
+ if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
+ DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
+ return -ENODEV;
}
-exit:
- if (pbuf) {
- MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
- pbuf = NULL;
+#endif /* WL_CFG80211 */
+
+ if (!dhdp->hang_was_sent) {
+#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
+ dhdp->hang_counts++;
+ if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
+ DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
+ __func__, dhdp->hang_counts));
+ BUG_ON(1);
+ }
+#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+#ifdef DHD_DEBUG_UART
+ /* If PCIe lane has broken, execute the debug uart application
+ * to gether a ramdump data from dongle via uart
+ */
+ if (!dhdp->info->duart_execute) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
+ dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
+ }
+#endif /* DHD_DEBUG_UART */
+ dhdp->hang_was_sent = 1;
+#ifdef BT_OVER_SDIO
+ dhdp->is_bt_recovery_required = TRUE;
+#endif // endif
+ schedule_work(&dhdp->info->dhd_hang_process_work);
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
}
- return res;
+ return ret;
}
-#endif /* KEEP_ALIVE */
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
-static void _dhd_apf_lock_local(dhd_info_t *dhd)
+int net_os_send_hang_message(struct net_device *dev)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
if (dhd) {
- mutex_lock(&dhd->dhd_apf_mutex);
+ /* Report FW problem when enabled */
+ if (dhd->pub.hang_report) {
+#ifdef BT_OVER_SDIO
+ if (netif_running(dev)) {
+#endif /* BT_OVER_SDIO */
+ ret = dhd_os_send_hang_message(&dhd->pub);
+#ifdef BT_OVER_SDIO
+ }
+ DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
+ bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
+#endif /* BT_OVER_SDIO */
+ } else {
+ DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+ __FUNCTION__));
+ }
}
-#endif // endif
+ return ret;
}
-static void _dhd_apf_unlock_local(dhd_info_t *dhd)
+int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = NULL;
+ dhd_pub_t *dhdp = NULL;
+ int reason;
+
+ dhd = DHD_DEV_INFO(dev);
if (dhd) {
- mutex_unlock(&dhd->dhd_apf_mutex);
+ dhdp = &dhd->pub;
}
-#endif // endif
+
+ if (!dhd || !dhdp) {
+ return 0;
+ }
+
+ reason = bcm_strtoul(string_num, NULL, 0);
+ DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+
+ if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ reason = 0;
+ }
+
+ dhdp->hang_reason = reason;
+
+ return net_os_send_hang_message(dev);
}
-static int
-__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
- u8* program, uint32 program_len)
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- wl_pkt_filter_t * pkt_filterp;
- wl_apf_program_t *apf_program;
- char *buf;
- u32 cmd_len, buf_len;
- int ifidx, ret;
- char cmd[] = "pkt_filter_add";
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
+bool dhd_force_country_change(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- cmd_len = sizeof(cmd);
+ if (dhd && dhd->pub.up)
+ return dhd->pub.force_country_change;
+ return FALSE;
+}
- /* Check if the program_len is more than the expected len
- * and if the program is NULL return from here.
- */
- if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
- DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
- __FUNCTION__, program_len, program));
- return -EINVAL;
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+ dhd->pub.dhd_cflags);
+#else
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
}
- buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
- WL_APF_PROGRAM_FIXED_LEN + program_len;
-
- buf = MALLOCZ(dhdp->osh, buf_len);
- if (unlikely(!buf)) {
- DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
- return -ENOMEM;
+#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
+ else {
+ /* Replace the ccode to XZ if ccode is undefined country */
+ if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
+ strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
+ strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
+ DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
+ }
}
+#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
- memcpy(buf, cmd, cmd_len);
-
- pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
- pkt_filterp->id = htod32(filter_id);
- pkt_filterp->negate_match = htod32(FALSE);
- pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
+ BCM_REFERENCE(dhd);
+}
- apf_program = &pkt_filterp->u.apf_program;
- apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
- apf_program->instr_len = htod16(program_len);
- memcpy(apf_program->instrs, program, program_len);
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif // endif
- ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
+ if (dhd && dhd->pub.up) {
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, notify);
+#endif // endif
}
+}
- if (buf) {
- MFREE(dhdp->osh, buf, buf_len);
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif // endif
+ if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, true);
+#endif // endif
}
- return ret;
}
-static int
-__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
- uint32 mode, uint32 enable)
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- wl_pkt_filter_enable_t * pkt_filterp;
- char *buf;
- u32 cmd_len, buf_len;
- int ifidx, ret;
- char cmd[] = "pkt_filter_enable";
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
+ if (!fw || fw[0] == '\0')
+ return -EINVAL;
- cmd_len = sizeof(cmd);
- buf_len = cmd_len + sizeof(*pkt_filterp);
+ strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+ dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
- buf = MALLOCZ(dhdp->osh, buf_len);
- if (unlikely(!buf)) {
- DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
- return -ENOMEM;
+#if defined(SOFTAP)
+ if (strstr(fw, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
}
+#endif // endif
+ return 0;
+}
- memcpy(buf, cmd, cmd_len);
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_lock_local(dhd);
+}
- pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
- pkt_filterp->id = htod32(filter_id);
- pkt_filterp->enable = htod32(enable);
+void dhd_net_if_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_unlock_local(dhd);
+}
- ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- goto exit;
- }
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+}
- ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
- WLC_SET_VAR, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- }
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+}
-exit:
- if (buf) {
- MFREE(dhdp->osh, buf, buf_len);
- }
- return ret;
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_lock(&dhd->dhd_suspend_mutex);
}
-static int
-__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
+static void dhd_suspend_unlock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_unlock(&dhd->dhd_suspend_mutex);
+}
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
- ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
- htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- }
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
- return ret;
+ return flags;
}
-void dhd_apf_lock(struct net_device *dev)
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- _dhd_apf_lock_local(dhd);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
}
-void dhd_apf_unlock(struct net_device *dev)
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- _dhd_apf_unlock_local(dhd);
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
-int
-dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+
+ return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
- if (!FW_SUPPORTED(dhdp, apf)) {
- DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+void *
+dhd_os_dbgring_lock_init(osl_t *osh)
+{
+ struct mutex *mtx = NULL;
- /*
- * Notify Android framework that APF is not supported by setting
- * version as zero.
- */
- *version = 0;
- return BCME_OK;
- }
+ mtx = MALLOCZ(osh, sizeof(*mtx));
+ if (mtx)
+ mutex_init(mtx);
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
+ return mtx;
+}
- ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
- WLC_GET_VAR, FALSE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
- __FUNCTION__, ret));
+void
+dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
+{
+ if (mtx) {
+ mutex_destroy(mtx);
+ MFREE(osh, mtx, sizeof(struct mutex));
}
+}
- return ret;
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
}
+#define MAX_WAIT_FOR_8021X_TX 100
+
int
-dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
+dhd_wait_pend8021x(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
-
- if (!FW_SUPPORTED(dhdp, apf)) {
- DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
- *max_len = 0;
- return BCME_OK;
- }
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int timeout = msecs_to_jiffies(10);
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ schedule_timeout(timeout);
+ DHD_PERIM_LOCK(&dhd->pub);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
}
-
- ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
- WLC_GET_VAR, FALSE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
- __FUNCTION__, ret));
+ if (ntimes == 0)
+ {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+ DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
}
-
- return ret;
+ return pend;
}
-int
-dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
- uint32 program_len)
+#if defined(DHD_DEBUG)
+int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret;
+ int ret = 0;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
- DHD_APF_LOCK(ndev);
+ /* open file to write */
+ fp = filp_open(file_name, flags, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
+ goto exit;
+ }
- /* delete, if filter already exists */
- if (dhdp->apf_set) {
- ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
- if (unlikely(ret)) {
- goto exit;
- }
- dhdp->apf_set = FALSE;
+ /* Write buf to file */
+ ret = compat_vfs_write(fp, buf, size, &pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
}
- ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
- if (ret) {
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(fp, 0);
+ if (ret < 0) {
+ DHD_ERROR(("sync file error, error = %d\n", ret));
goto exit;
}
- dhdp->apf_set = TRUE;
+ ret = BCME_OK;
- if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- /* Driver is still in (early) suspend state, enable APF filter back */
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
- }
exit:
- DHD_APF_UNLOCK(ndev);
+ /* close file before return */
+ if (!IS_ERR(fp))
+ filp_close(fp, current->files);
+
+ /* restore previous address limit */
+ set_fs(old_fs);
return ret;
}
+#endif // endif
-int
-dhd_dev_apf_enable_filter(struct net_device *ndev)
+#ifdef DHD_DEBUG
+static void
+dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
- bool nan_dp_active = false;
+ char *type_str = NULL;
- DHD_APF_LOCK(ndev);
-#ifdef WL_NAN
- nan_dp_active = wl_cfgnan_is_dp_active(ndev);
-#endif /* WL_NAN */
- if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
- !nan_dp_active)) {
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+ switch (type) {
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+ type_str = "resumed_on_timeout";
+ break;
+ case DUMP_TYPE_D3_ACK_TIMEOUT:
+ type_str = "D3_ACK_timeout";
+ break;
+ case DUMP_TYPE_DONGLE_TRAP:
+ type_str = "Dongle_Trap";
+ break;
+ case DUMP_TYPE_MEMORY_CORRUPTION:
+ type_str = "Memory_Corruption";
+ break;
+ case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+ type_str = "PKTID_AUDIT_Fail";
+ break;
+ case DUMP_TYPE_PKTID_INVALID:
+ type_str = "PKTID_INVALID";
+ break;
+ case DUMP_TYPE_SCAN_TIMEOUT:
+ type_str = "SCAN_timeout";
+ break;
+ case DUMP_TYPE_SCAN_BUSY:
+ type_str = "SCAN_Busy";
+ break;
+ case DUMP_TYPE_BY_SYSDUMP:
+ if (substr_type == CMD_UNWANTED) {
+ type_str = "BY_SYSDUMP_FORUSER_unwanted";
+ } else if (substr_type == CMD_DISCONNECTED) {
+ type_str = "BY_SYSDUMP_FORUSER_disconnected";
+ } else {
+ type_str = "BY_SYSDUMP_FORUSER";
+ }
+ break;
+ case DUMP_TYPE_BY_LIVELOCK:
+ type_str = "BY_LIVELOCK";
+ break;
+ case DUMP_TYPE_AP_LINKUP_FAILURE:
+ type_str = "BY_AP_LINK_FAILURE";
+ break;
+ case DUMP_TYPE_AP_ABNORMAL_ACCESS:
+ type_str = "INVALID_ACCESS";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
+ type_str = "ERROR_RX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
+ type_str = "ERROR_TX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
+ type_str = "CFG_VENDOR_TRIGGERED";
+ break;
+ case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
+ type_str = "BY_INVALID_RING_RDWR";
+ break;
+ case DUMP_TYPE_IFACE_OP_FAILURE:
+ type_str = "BY_IFACE_OP_FAILURE";
+ break;
+ case DUMP_TYPE_TRANS_ID_MISMATCH:
+ type_str = "BY_TRANS_ID_MISMATCH";
+ break;
+#ifdef DEBUG_DNGL_INIT_FAIL
+ case DUMP_TYPE_DONGLE_INIT_FAILURE:
+ type_str = "DONGLE_INIT_FAIL";
+ break;
+#endif /* DEBUG_DNGL_INIT_FAIL */
+ case DUMP_TYPE_DONGLE_HOST_EVENT:
+ type_str = "BY_DONGLE_HOST_EVENT";
+ break;
+ case DUMP_TYPE_SMMU_FAULT:
+ type_str = "SMMU_FAULT";
+ break;
+ case DUMP_TYPE_BY_USER:
+ type_str = "BY_USER";
+ break;
+#ifdef DHD_ERPOM
+ case DUMP_TYPE_DUE_TO_BT:
+ type_str = "DUE_TO_BT";
+ break;
+#endif /* DHD_ERPOM */
+ case DUMP_TYPE_LOGSET_BEYOND_RANGE:
+ type_str = "LOGSET_BEYOND_RANGE";
+ break;
+ case DUMP_TYPE_CTO_RECOVERY:
+ type_str = "CTO_RECOVERY";
+ break;
+ case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
+ type_str = "SEQUENTIAL_PRIVCMD_ERROR";
+ break;
+ case DUMP_TYPE_PROXD_TIMEOUT:
+ type_str = "PROXD_TIMEOUT";
+ break;
+ case DUMP_TYPE_PKTID_POOL_DEPLETED:
+ type_str = "PKTID_POOL_DEPLETED";
+ break;
+ default:
+ type_str = "Unknown_type";
+ break;
}
- DHD_APF_UNLOCK(ndev);
-
- return ret;
+ strncpy(buf, type_str, strlen(type_str));
+ buf[strlen(type_str)] = 0;
}
-int
-dhd_dev_apf_disable_filter(struct net_device *ndev)
+void
+dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ char memdump_type[32];
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
-
- DHD_APF_LOCK(ndev);
- if (dhdp->apf_set) {
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
+ /* Init file name */
+ memset(memdump_path, 0, len);
+ memset(memdump_type, 0, sizeof(memdump_type));
+ dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
+ get_debug_dump_time(dhdp->debug_dump_time_str);
+ snprintf(memdump_path, len, "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
+ if (strstr(fname, "sssr_dump")) {
+ DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
+ } else {
+ DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
+ memdump_path, FILE_NAME_HAL_TAG));
}
-
- DHD_APF_UNLOCK(ndev);
-
- return ret;
}
int
-dhd_dev_apf_delete_filter(struct net_device *ndev)
+write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
int ret = 0;
+ char memdump_path[128];
+ char memdump_type[32];
+ uint32 file_mode;
- DHD_APF_LOCK(ndev);
-
- if (dhdp->apf_set) {
- ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
- if (!ret) {
- dhdp->apf_set = FALSE;
- }
- }
-
- DHD_APF_UNLOCK(ndev);
+ /* Init file name */
+ memset(memdump_path, 0, sizeof(memdump_path));
+ memset(memdump_type, 0, sizeof(memdump_type));
+ dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
+ clear_debug_dump_time(dhd->debug_dump_time_str);
+ get_debug_dump_time(dhd->debug_dump_time_str);
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
- return ret;
-}
-#endif /* PKT_FILTER_SUPPORT && APF */
+ /* print SOCRAM dump file path */
+ DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
-static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
-{
- dhd_info_t *dhd;
- struct net_device *dev;
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
+#endif /* DHD_LOG_DUMP */
- dhd = (dhd_info_t *)dhd_info;
- if (!dhd || !dhd->iflist[0])
- return;
- dev = dhd->iflist[0]->net;
+ /* Write file */
+ ret = write_file(memdump_path, file_mode, buf, size);
- if (dev) {
- /*
- * For HW2, dev_close need to be done to recover
- * from upper layer after hang. For Interposer skip
- * dev_close so that dhd iovars can be used to take
- * socramdump after crash, also skip for HW4 as
- * handling of hang event is different
- */
-#if defined(WL_WIRELESS_EXT)
- wl_iw_send_priv_event(dev, "HANG");
-#endif // endif
-#if defined(WL_CFG80211)
- wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
-#endif // endif
+#ifdef DHD_DUMP_MNGR
+ if (ret == BCME_OK) {
+ dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
}
-}
+#endif /* DHD_DUMP_MNGR */
-#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
-extern dhd_pub_t *link_recovery;
-void dhd_host_recover_link(void)
-{
- DHD_ERROR(("****** %s ******\n", __FUNCTION__));
- link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
- dhd_bus_set_linkdown(link_recovery, TRUE);
- dhd_os_send_hang_message(link_recovery);
+ return ret;
}
-EXPORT_SYMBOL(dhd_host_recover_link);
-#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* DHD_DEBUG */
-int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
int ret = 0;
- if (dhdp) {
-#ifdef WL_CFG80211
- struct net_device *primary_ndev;
- struct bcm_cfg80211 *cfg;
-
- primary_ndev = dhd_linux_get_primary_netdev(dhdp);
- if (!primary_ndev) {
- DHD_ERROR(("%s: Cannot find primary netdev\n",
- __FUNCTION__));
- return -ENODEV;
- }
-
- cfg = wl_get_cfg(primary_ndev);
- if (!cfg) {
- DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
- return -EINVAL;
- }
-
- /* Skip sending HANG event to framework if driver is not ready */
- if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
- DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
- return -ENODEV;
- }
-#endif /* WL_CFG80211 */
-
- if (!dhdp->hang_was_sent) {
-#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
- dhdp->hang_counts++;
- if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
- DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
- __func__, dhdp->hang_counts));
- BUG_ON(1);
- }
-#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
-#ifdef DHD_DEBUG_UART
- /* If PCIe lane has broken, execute the debug uart application
- * to gether a ramdump data from dongle via uart
- */
- if (!dhdp->info->duart_execute) {
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
- dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
- }
-#endif /* DHD_DEBUG_UART */
- dhdp->hang_was_sent = 1;
-#ifdef BT_OVER_SDIO
- dhdp->is_bt_recovery_required = TRUE;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+ dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_rx_timeout_enable)
+ wake_lock_timeout(&dhd->wl_rxwake,
+ msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+ if (dhd->wakelock_ctrl_timeout_enable)
+ wake_lock_timeout(&dhd->wl_ctrlwake,
+ msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
#endif // endif
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
- DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
- DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
- dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
- }
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
return ret;
}
-int net_os_send_hang_message(struct net_device *dev)
+int net_os_wake_lock_timeout(struct net_device *dev)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
- if (dhd) {
- /* Report FW problem when enabled */
- if (dhd->pub.hang_report) {
-#ifdef BT_OVER_SDIO
- if (netif_running(dev)) {
-#endif /* BT_OVER_SDIO */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- ret = dhd_os_send_hang_message(&dhd->pub);
-#else
- ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
-#endif // endif
-#ifdef BT_OVER_SDIO
- }
- DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
- bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
-#endif /* BT_OVER_SDIO */
- } else {
- DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
- __FUNCTION__));
- }
- }
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
return ret;
}
-int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
{
- dhd_info_t *dhd = NULL;
- dhd_pub_t *dhdp = NULL;
- int reason;
-
- dhd = DHD_DEV_INFO(dev);
- if (dhd) {
- dhdp = &dhd->pub;
- }
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
- if (!dhd || !dhdp) {
- return 0;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_rx_timeout_enable)
+ dhd->wakelock_rx_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
+ return 0;
+}
- reason = bcm_strtoul(string_num, NULL, 0);
- DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
- if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
- reason = 0;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_ctrl_timeout_enable)
+ dhd->wakelock_ctrl_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
+ return 0;
+}
- dhdp->hang_reason = reason;
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
- return net_os_send_hang_message(dev);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wake_lock_active(&dhd->wl_ctrlwake))
+ wake_unlock(&dhd->wl_ctrlwake);
+#endif // endif
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
-int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+ return ret;
}
-bool dhd_force_country_change(struct net_device *dev)
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
- if (dhd && dhd->pub.up)
- return dhd->pub.force_country_change;
- return FALSE;
+ if (dhd)
+ ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+ return ret;
}
-void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
- wl_country_t *cspec)
+#if defined(DHD_TRACE_WAKE_LOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#include <linux/hashtable.h>
+#else
+#include <linux/hash.h>
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+/* Define 2^5 = 32 bucket size hash table */
+DEFINE_HASHTABLE(wklock_history, 5);
+#else
+/* Define 2^5 = 32 bucket size hash table */
+struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+atomic_t trace_wklock_onoff;
+typedef enum dhd_wklock_type {
+ DHD_WAKE_LOCK,
+ DHD_WAKE_UNLOCK,
+ DHD_WAIVE_LOCK,
+ DHD_RESTORE_LOCK
+} dhd_wklock_t;
+
+struct wk_trace_record {
+ unsigned long addr; /* Address of the instruction */
+ dhd_wklock_t lock_type; /* lock_type */
+ unsigned long long counter; /* counter information */
+ struct hlist_node wklock_node; /* hash node */
+};
+
+static struct wk_trace_record *find_wklock_entry(unsigned long addr)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#if defined(DHD_BLOB_EXISTENCE_CHECK)
- if (!dhd->pub.is_blob)
-#endif /* DHD_BLOB_EXISTENCE_CHECK */
- {
-#if defined(CUSTOM_COUNTRY_CODE)
- get_customized_country_code(dhd->adapter, country_iso_code, cspec,
- dhd->pub.dhd_cflags);
+ struct wk_trace_record *wklock_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
#else
- get_customized_country_code(dhd->adapter, country_iso_code, cspec);
-#endif /* CUSTOM_COUNTRY_CODE */
- }
-#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
- else {
- /* Replace the ccode to XZ if ccode is undefined country */
- if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
- strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
- strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
- strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
- DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
+ struct hlist_node *entry;
+ int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ if (wklock_info->addr == addr) {
+ return wklock_info;
}
}
-#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
-
- BCM_REFERENCE(dhd);
+ return NULL;
}
-void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif // endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ hash_add(hashtable, node, key); \
+ } while (0);
+#else
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
+ hlist_add_head(node, &hashtable[index]); \
+ } while (0);
+#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
- if (dhd && dhd->pub.up) {
- memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
-#ifdef WL_CFG80211
- wl_update_wiphybands(cfg, notify);
-#endif // endif
- }
-}
+#define STORE_WKLOCK_RECORD(wklock_type) \
+ do { \
+ struct wk_trace_record *wklock_info = NULL; \
+ unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
+ wklock_info = find_wklock_entry(func_addr); \
+ if (wklock_info) { \
+ if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ } else { \
+ wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
+ if (!wklock_info) {\
+ printk("Can't allocate wk_trace_record \n"); \
+ } else { \
+ wklock_info->addr = func_addr; \
+ wklock_info->lock_type = wklock_type; \
+ if (wklock_type == DHD_WAIVE_LOCK || \
+ wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
+ } \
+ } \
+ } while (0);
-void dhd_bus_band_set(struct net_device *dev, uint band)
+static inline void dhd_wk_lock_rec_dump(void)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif // endif
- if (dhd && dhd->pub.up) {
-#ifdef WL_CFG80211
- wl_update_wiphybands(cfg, true);
-#endif // endif
- }
+ int bkt;
+ struct wk_trace_record *wklock_info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#else
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ switch (wklock_info->lock_type) {
+ case DHD_WAKE_LOCK:
+ printk("wakelock lock : %pS lock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_WAKE_UNLOCK:
+ printk("wakelock unlock : %pS, unlock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_WAIVE_LOCK:
+ printk("wakelock waive : %pS before_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_RESTORE_LOCK:
+ printk("wakelock restore : %pS, after_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ }
+ }
}
-int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- if (!fw || fw[0] == '\0')
- return -EINVAL;
-
- strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
- dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ int i;
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
-#if defined(SOFTAP)
- if (strstr(fw, "apsta") != NULL) {
- DHD_INFO(("GOT APSTA FIRMWARE\n"));
- ap_fw_loaded = TRUE;
- } else {
- DHD_INFO(("GOT STA FIRMWARE\n"));
- ap_fw_loaded = FALSE;
- }
-#endif // endif
- return 0;
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_init(wklock_history);
+#else
+ for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
+ INIT_HLIST_HEAD(&wklock_history[i]);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ atomic_set(&trace_wklock_onoff, 1);
}
-void dhd_net_if_lock(struct net_device *dev)
+static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- dhd_net_if_lock_local(dhd);
-}
+ int bkt;
+ struct wk_trace_record *wklock_info;
+ struct hlist_node *tmp;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
-void dhd_net_if_unlock(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- dhd_net_if_unlock_local(dhd);
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
+#else
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry_safe(wklock_info, entry, tmp,
+ &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_del(&wklock_info->wklock_node);
+#else
+ hlist_del_init(&wklock_info->wklock_node);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ kfree(wklock_info);
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-static void dhd_net_if_lock_local(dhd_info_t *dhd)
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- if (dhd)
- mutex_lock(&dhd->dhd_net_if_mutex);
-#endif // endif
-}
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ unsigned long flags;
-static void dhd_net_if_unlock_local(dhd_info_t *dhd)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- if (dhd)
- mutex_unlock(&dhd->dhd_net_if_mutex);
-#endif // endif
-}
+ printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd_wk_lock_rec_dump();
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
-static void dhd_suspend_lock(dhd_pub_t *pub)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- mutex_lock(&dhd->dhd_suspend_mutex);
-#endif // endif
}
+#else
+#define STORE_WKLOCK_RECORD(wklock_type)
+#endif /* ! DHD_TRACE_WAKE_LOCK */
-static void dhd_suspend_unlock(dhd_pub_t *pub)
+int dhd_os_wake_lock(dhd_pub_t *pub)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- mutex_unlock(&dhd->dhd_suspend_mutex);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(pub);
#endif // endif
+ }
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+
+ return ret;
}
-unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+void dhd_event_wake_lock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags = 0;
-
- if (dhd)
- spin_lock_irqsave(&dhd->dhd_lock, flags);
- return flags;
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif // endif
+ }
}
-void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+void
+dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
}
-/* Linux specific multipurpose spinlock API */
-void *
-dhd_os_spin_lock_init(osl_t *osh)
-{
- /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
- /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
- /* and this results in kernel asserts in internal builds */
- spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
- if (lock)
- spin_lock_init(lock);
- return ((void *)lock);
-}
void
-dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
-{
- if (lock)
- MFREE(osh, lock, sizeof(spinlock_t) + 4);
-}
-unsigned long
-dhd_os_spin_lock(void *lock)
+dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
{
- unsigned long flags = 0;
-
- if (lock)
- spin_lock_irqsave((spinlock_t *)lock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- return flags;
-}
-void
-dhd_os_spin_unlock(void *lock, unsigned long flags)
-{
- if (lock)
- spin_unlock_irqrestore((spinlock_t *)lock, flags);
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
}
-void *
-dhd_os_dbgring_lock_init(osl_t *osh)
+int net_os_wake_lock(struct net_device *dev)
{
- struct mutex *mtx = NULL;
-
- mtx = MALLOCZ(osh, sizeof(*mtx));
- if (mtx)
- mutex_init(mtx);
-
- return mtx;
-}
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
-void
-dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
-{
- if (mtx) {
- mutex_destroy(mtx);
- MFREE(osh, mtx, sizeof(struct mutex));
- }
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
}
-static int
-dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+int dhd_os_wake_unlock(dhd_pub_t *pub)
{
- return (atomic_read(&dhd->pend_8021x_cnt));
-}
-
-#define MAX_WAIT_FOR_8021X_TX 100
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
-int
-dhd_wait_pend8021x(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int timeout = msecs_to_jiffies(10);
- int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = dhd_get_pend_8021x_cnt(dhd);
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- while (ntimes && pend) {
- if (pend) {
- set_current_state(TASK_INTERRUPTIBLE);
- DHD_PERIM_UNLOCK(&dhd->pub);
- schedule_timeout(timeout);
- DHD_PERIM_LOCK(&dhd->pub);
- set_current_state(TASK_RUNNING);
- ntimes--;
+ if (dhd->wakelock_counter > 0) {
+ dhd->wakelock_counter--;
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(pub);
+#endif // endif
+ }
+ ret = dhd->wakelock_counter;
}
- pend = dhd_get_pend_8021x_cnt(dhd);
- }
- if (ntimes == 0)
- {
- atomic_set(&dhd->pend_8021x_cnt, 0);
- DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
- return pend;
+ return ret;
}
-#if defined(DHD_DEBUG)
-int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
+void dhd_event_wake_unlock(dhd_pub_t *pub)
{
- int ret = 0;
- struct file *fp = NULL;
- mm_segment_t old_fs;
- loff_t pos = 0;
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- /* open file to write */
- fp = filp_open(file_name, flags, 0664);
- if (IS_ERR(fp)) {
- DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
- goto exit;
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(pub);
+#endif // endif
}
+}
- /* Write buf to file */
- ret = vfs_write(fp, buf, size, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
+void dhd_pm_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_pmwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_pmwake)) {
+ wake_unlock(&dhd->wl_pmwake);
+ }
}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
- /* Sync file from filesystem to physical media */
- ret = vfs_fsync(fp, 0);
- if (ret < 0) {
- DHD_ERROR(("sync file error, error = %d\n", ret));
- goto exit;
+void dhd_txfl_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_txflwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_txflwake)) {
+ wake_unlock(&dhd->wl_txflwake);
+ }
}
- ret = BCME_OK;
+#endif /* CONFIG_HAS_WAKELOCK */
+}
-exit:
- /* close file before return */
- if (!IS_ERR(fp))
- filp_close(fp, current->files);
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
+ dhd_info_t *dhd;
- /* restore previous address limit */
- set_fs(old_fs);
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
- return ret;
-}
+#ifdef CONFIG_HAS_WAKELOCK
+ /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+ if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+ (wake_lock_active(&dhd->wl_wdwake))))
+ return 1;
+#elif defined(BCMSDIO)
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+ return 1;
#endif // endif
+ return 0;
+}
-#ifdef DHD_DEBUG
-static void
-dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
+int
+dhd_os_check_wakelock_all(dhd_pub_t *pub)
{
- char *type_str = NULL;
+#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
+#if defined(CONFIG_HAS_WAKELOCK)
+ int l1, l2, l3, l4, l7, l8, l9;
+ int l5 = 0, l6 = 0;
+ int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
+ dhd_info_t *dhd;
- switch (type) {
- case DUMP_TYPE_RESUMED_ON_TIMEOUT:
- type_str = "resumed_on_timeout";
- break;
- case DUMP_TYPE_D3_ACK_TIMEOUT:
- type_str = "D3_ACK_timeout";
- break;
- case DUMP_TYPE_DONGLE_TRAP:
- type_str = "Dongle_Trap";
- break;
- case DUMP_TYPE_MEMORY_CORRUPTION:
- type_str = "Memory_Corruption";
- break;
- case DUMP_TYPE_PKTID_AUDIT_FAILURE:
- type_str = "PKTID_AUDIT_Fail";
- break;
- case DUMP_TYPE_PKTID_INVALID:
- type_str = "PKTID_INVALID";
- break;
- case DUMP_TYPE_SCAN_TIMEOUT:
- type_str = "SCAN_timeout";
- break;
- case DUMP_TYPE_SCAN_BUSY:
- type_str = "SCAN_Busy";
- break;
- case DUMP_TYPE_BY_SYSDUMP:
- if (substr_type == CMD_UNWANTED) {
- type_str = "BY_SYSDUMP_FORUSER_unwanted";
- } else if (substr_type == CMD_DISCONNECTED) {
- type_str = "BY_SYSDUMP_FORUSER_disconnected";
- } else {
- type_str = "BY_SYSDUMP_FORUSER";
- }
- break;
- case DUMP_TYPE_BY_LIVELOCK:
- type_str = "BY_LIVELOCK";
- break;
- case DUMP_TYPE_AP_LINKUP_FAILURE:
- type_str = "BY_AP_LINK_FAILURE";
- break;
- case DUMP_TYPE_AP_ABNORMAL_ACCESS:
- type_str = "INVALID_ACCESS";
- break;
- case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
- type_str = "ERROR_RX_TIMED_OUT";
- break;
- case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
- type_str = "ERROR_TX_TIMED_OUT";
- break;
- case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
- type_str = "CFG_VENDOR_TRIGGERED";
- break;
- case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
- type_str = "BY_INVALID_RING_RDWR";
- break;
- case DUMP_TYPE_IFACE_OP_FAILURE:
- type_str = "BY_IFACE_OP_FAILURE";
- break;
- case DUMP_TYPE_TRANS_ID_MISMATCH:
- type_str = "BY_TRANS_ID_MISMATCH";
- break;
-#ifdef DEBUG_DNGL_INIT_FAIL
- case DUMP_TYPE_DONGLE_INIT_FAILURE:
- type_str = "DONGLE_INIT_FAIL";
- break;
-#endif /* DEBUG_DNGL_INIT_FAIL */
- case DUMP_TYPE_DONGLE_HOST_EVENT:
- type_str = "BY_DONGLE_HOST_EVENT";
- break;
- case DUMP_TYPE_SMMU_FAULT:
- type_str = "SMMU_FAULT";
- break;
- case DUMP_TYPE_BY_USER:
- type_str = "BY_USER";
- break;
-#ifdef DHD_ERPOM
- case DUMP_TYPE_DUE_TO_BT:
- type_str = "DUE_TO_BT";
- break;
-#endif /* DHD_ERPOM */
- case DUMP_TYPE_LOGSET_BEYOND_RANGE:
- type_str = "LOGSET_BEYOND_RANGE";
- break;
- default:
- type_str = "Unknown_type";
- break;
+ if (!pub) {
+ return 0;
+ }
+ dhd = (dhd_info_t *)(pub->info);
+ if (!dhd) {
+ return 0;
}
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
- strncpy(buf, type_str, strlen(type_str));
- buf[strlen(type_str)] = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ c = dhd->wakelock_counter;
+ l1 = wake_lock_active(&dhd->wl_wifi);
+ l2 = wake_lock_active(&dhd->wl_wdwake);
+ l3 = wake_lock_active(&dhd->wl_rxwake);
+ l4 = wake_lock_active(&dhd->wl_ctrlwake);
+ l7 = wake_lock_active(&dhd->wl_evtwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ l5 = wake_lock_active(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ l6 = wake_lock_active(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ l8 = wake_lock_active(&dhd->wl_pmwake);
+ l9 = wake_lock_active(&dhd->wl_txflwake);
+ lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
+
+ /* Indicate to the Host to avoid going to suspend if internal locks are up */
+ if (lock_active) {
+ DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
+ "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
+ __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
+ return 1;
+ }
+#elif defined(BCMSDIO)
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
+ return 1;
+ }
+#endif /* defined(BCMSDIO) */
+ return 0;
}
-int
-write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
+int net_os_wake_unlock(struct net_device *dev)
{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
- char memdump_path[128];
- char memdump_type[32];
- struct timeval curtime;
- uint32 file_mode;
-
- /* Init file name */
- memset(memdump_path, 0, sizeof(memdump_path));
- memset(memdump_type, 0, sizeof(memdump_type));
- do_gettimeofday(&curtime);
- dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
- "/data/misc/wifi/", fname, memdump_type,
- (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
- file_mode = O_CREAT | O_WRONLY | O_SYNC;
-
- /* print SOCRAM dump file path */
- DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
-#endif /* DHD_LOG_DUMP */
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
- /* Write file */
- ret = write_file(memdump_path, file_mode, buf, size);
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
-#ifdef DHD_DUMP_MNGR
- if (ret == BCME_OK) {
- dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ /* if wakelock_wd_counter was never used : lock it at once */
+ wake_lock(&dhd->wl_wdwake);
+#endif // endif
+ }
+ dhd->wakelock_wd_counter++;
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-#endif /* DHD_DUMP_MNGR */
-
return ret;
}
-#endif /* DHD_DEBUG */
-int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags;
int ret = 0;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
- dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+ if (dhd->wakelock_wd_counter > 0) {
+ dhd->wakelock_wd_counter = 0;
+ if (!dhd->waive_wakelock) {
#ifdef CONFIG_HAS_WAKELOCK
- if (dhd->wakelock_rx_timeout_enable)
- wake_lock_timeout(&dhd->wl_rxwake,
- msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
- if (dhd->wakelock_ctrl_timeout_enable)
- wake_lock_timeout(&dhd->wl_ctrlwake,
- msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+ wake_unlock(&dhd->wl_wdwake);
#endif // endif
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
+ }
+ }
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
return ret;
}
-int net_os_wake_lock_timeout(struct net_device *dev)
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void
+dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- ret = dhd_os_wake_lock_timeout(&dhd->pub);
- return ret;
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
}
-int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+void
+dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (val > dhd->wakelock_rx_timeout_enable)
- dhd->wakelock_rx_timeout_enable = val;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (dhd) {
+ /* if wl_intrwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_intrwake)) {
+ wake_unlock(&dhd->wl_intrwake);
+ }
}
- return 0;
+#endif /* CONFIG_HAS_WAKELOCK */
}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
-int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+#ifdef DHD_USE_SCAN_WAKELOCK
+void
+dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (val > dhd->wakelock_ctrl_timeout_enable)
- dhd->wakelock_ctrl_timeout_enable = val;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
}
- return 0;
+#endif /* CONFIG_HAS_WAKELOCK */
}
-int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+void
+dhd_os_scan_wake_unlock(dhd_pub_t *pub)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- dhd->wakelock_ctrl_timeout_enable = 0;
-#ifdef CONFIG_HAS_WAKELOCK
- if (wake_lock_active(&dhd->wl_ctrlwake))
- wake_unlock(&dhd->wl_ctrlwake);
-#endif // endif
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (dhd) {
+ /* if wl_scanwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_scanwake)) {
+ wake_unlock(&dhd->wl_scanwake);
+ }
}
- return 0;
+#endif /* CONFIG_HAS_WAKELOCK */
}
+#endif /* DHD_USE_SCAN_WAKELOCK */
-int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
int ret = 0;
- if (dhd)
- ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (dhd->waive_wakelock == FALSE) {
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ /* record current lock status */
+ dhd->wakelock_before_waive = dhd->wakelock_counter;
+ dhd->waive_wakelock = TRUE;
+ }
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
return ret;
}
-int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
int ret = 0;
- if (dhd)
- ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
- return ret;
-}
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-#include <linux/hashtable.h>
-#else
-#include <linux/hash.h>
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ if (!dhd)
+ return 0;
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
+ return 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-/* Define 2^5 = 32 bucket size hash table */
-DEFINE_HASHTABLE(wklock_history, 5);
-#else
-/* Define 2^5 = 32 bucket size hash table */
-struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-int trace_wklock_onoff = 1;
-typedef enum dhd_wklock_type {
- DHD_WAKE_LOCK,
- DHD_WAKE_UNLOCK,
- DHD_WAIVE_LOCK,
- DHD_RESTORE_LOCK
-} dhd_wklock_t;
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (!dhd->waive_wakelock)
+ goto exit;
-struct wk_trace_record {
- unsigned long addr; /* Address of the instruction */
- dhd_wklock_t lock_type; /* lock_type */
- unsigned long long counter; /* counter information */
- struct hlist_node wklock_node; /* hash node */
-};
+ dhd->waive_wakelock = FALSE;
+ /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+ * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+ * the lock in between, do the same by calling wake_unlock or pm_relax
+ */
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (atomic_read(&trace_wklock_onoff)) {
+ STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
-static struct wk_trace_record *find_wklock_entry(unsigned long addr)
-{
- struct wk_trace_record *wklock_info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
-#else
- struct hlist_node *entry;
- int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
- hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- {
- if (wklock_info->addr == addr) {
- return wklock_info;
- }
+ if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif // endif
+ } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO)
+ dhd_bus_dev_pm_relax(&dhd->pub);
+#endif // endif
}
- return NULL;
+ dhd->wakelock_before_waive = 0;
+exit:
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ return ret;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-#define HASH_ADD(hashtable, node, key) \
- do { \
- hash_add(hashtable, node, key); \
- } while (0);
-#else
-#define HASH_ADD(hashtable, node, key) \
- do { \
- int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
- hlist_add_head(node, &hashtable[index]); \
- } while (0);
-#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
-
-#define STORE_WKLOCK_RECORD(wklock_type) \
- do { \
- struct wk_trace_record *wklock_info = NULL; \
- unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
- wklock_info = find_wklock_entry(func_addr); \
- if (wklock_info) { \
- if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
- wklock_info->counter = dhd->wakelock_counter; \
- } else { \
- wklock_info->counter++; \
- } \
- } else { \
- wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
- if (!wklock_info) {\
- printk("Can't allocate wk_trace_record \n"); \
- } else { \
- wklock_info->addr = func_addr; \
- wklock_info->lock_type = wklock_type; \
- if (wklock_type == DHD_WAIVE_LOCK || \
- wklock_type == DHD_RESTORE_LOCK) { \
- wklock_info->counter = dhd->wakelock_counter; \
- } else { \
- wklock_info->counter++; \
- } \
- HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
- } \
- } \
- } while (0);
+void dhd_os_wake_lock_init(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ /* wakelocks prevent a system from going into a low power state */
+#ifdef CONFIG_HAS_WAKELOCK
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+ wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+ wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
+ wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
+ wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_init(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+}
-static inline void dhd_wk_lock_rec_dump(void)
+void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
{
- int bkt;
- struct wk_trace_record *wklock_info;
+ DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_destroy(&dhd->wl_rxwake);
+ wake_lock_destroy(&dhd->wl_ctrlwake);
+ wake_lock_destroy(&dhd->wl_evtwake);
+ wake_lock_destroy(&dhd->wl_pmwake);
+ wake_lock_destroy(&dhd->wl_txflwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_destroy(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_deinit(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
-#else
- struct hlist_node *entry = NULL;
- int max_index = ARRAY_SIZE(wklock_history);
- for (bkt = 0; bkt < max_index; bkt++)
- hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- {
- switch (wklock_info->lock_type) {
- case DHD_WAKE_LOCK:
- printk("wakelock lock : %pS lock_counter : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_WAKE_UNLOCK:
- printk("wakelock unlock : %pS, unlock_counter : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_WAIVE_LOCK:
- printk("wakelock waive : %pS before_waive : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_RESTORE_LOCK:
- printk("wakelock restore : %pS, after_waive : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- }
- }
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+ if (!pub)
+ return FALSE;
+ return pub->up;
}
-static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
{
- unsigned long flags;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
int i;
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_init(wklock_history);
-#else
- for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
- INIT_HLIST_HEAD(&wklock_history[i]);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ i = snprintf(info_string, sizeof(info_string),
+ " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
+ printf("%s\n", info_string);
+
+ if (!dhdp)
+ return;
+
+ i = snprintf(&info_string[i], sizeof(info_string) - i,
+ "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
+ dhd_conf_get_chiprev(dhdp));
}
-static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
{
- int bkt;
- struct wk_trace_record *wklock_info;
- struct hlist_node *tmp;
- unsigned long flags;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
- struct hlist_node *entry = NULL;
- int max_index = ARRAY_SIZE(wklock_history);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ int ifidx;
+ int ret = 0;
+ dhd_info_t *dhd = NULL;
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
-#else
- for (bkt = 0; bkt < max_index; bkt++)
- hlist_for_each_entry_safe(wklock_info, entry, tmp,
- &wklock_history[bkt], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
- {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_del(&wklock_info->wklock_node);
-#else
- hlist_del_init(&wklock_info->wklock_node);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
- kfree(wklock_info);
- }
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (!net || !DEV_PRIV(net)) {
+ DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
+ __FUNCTION__, net, DEV_PRIV(net)));
+ return -EINVAL;
+ }
+
+ dhd = DHD_DEV_INFO(net);
+ if (!dhd)
+ return -EINVAL;
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return ret;
}
-void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
{
- dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
- unsigned long flags;
+ struct net_device *net;
- printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- dhd_wk_lock_rec_dump();
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ net = dhd_idx2net(dhdp, ifidx);
+ if (!net) {
+ DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+ return -EINVAL;
+ }
+ return dhd_check_hang(net, dhdp, ret);
}
-#else
-#define STORE_WKLOCK_RECORD(wklock_type)
-#endif /* ! DHD_TRACE_WAKE_LOCK */
-int dhd_os_wake_lock(dhd_pub_t *pub)
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ return dhdp->info->unit;
+}
+
+#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
+#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
+int dhd_deepsleep(struct net_device *dev, int flag)
+{
+ char iovbuf[20];
+ uint powervar = 0;
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ int cnt = 0;
int ret = 0;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_stay_awake(pub);
-#endif // endif
- }
-#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
- STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- dhd->wakelock_counter++;
- ret = dhd->wakelock_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ dhd = DHD_DEV_INFO(dev);
+ dhdp = &dhd->pub;
+
+ switch (flag) {
+ case 1 : /* Deepsleep on */
+ DHD_ERROR(("[WiFi] Deepsleep On\n"));
+ /* give some time to sysioc_work before deepsleep */
+ OSL_SLEEP(200);
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhdp);
+#endif /* PKT_FILTER_SUPPORT */
+ /* Disable MPC */
+ powervar = 0;
+ ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
+ 0, TRUE);
+
+ /* Enable Deepsleep */
+ powervar = 1;
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
+ NULL, 0, TRUE);
+ break;
+
+ case 0: /* Deepsleep Off */
+ DHD_ERROR(("[WiFi] Deepsleep Off\n"));
+
+ /* Disable Deepsleep */
+ for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
+ powervar = 0;
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
+ sizeof(powervar), NULL, 0, TRUE);
+
+ ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
+ sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("the error of dhd deepsleep status"
+ " ret value :%d\n", ret));
+ } else {
+ if (!(*(int *)iovbuf)) {
+ DHD_ERROR(("deepsleep mode is 0,"
+ " count: %d\n", cnt));
+ break;
+ }
+ }
+ }
+
+ /* Enable MPC */
+ powervar = 1;
+ ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
+ 0, TRUE);
+ break;
}
- return ret;
+ return 0;
}
+#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
-void dhd_event_wake_lock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+#ifdef PROP_TXSTATUS
- if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_stay_awake(pub);
-#endif // endif
- }
+void dhd_wlfc_plat_init(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
}
-void
-dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
+void dhd_wlfc_plat_deinit(void *dhd)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
-
- if (dhd) {
- wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKE_LOCK */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
}
-void
-dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
+bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+#ifdef SKIP_WLFC_ON_CONCURRENT
- if (dhd) {
- wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKE_LOCK */
+#ifdef WL_CFG80211
+ struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
+ if (net)
+ /* enable flow control in vsdb mode */
+ return !(wl_cfg80211_is_concurrent_mode(net));
+#else
+ return TRUE; /* skip flow control */
+#endif /* WL_CFG80211 */
+
+#else
+ return FALSE;
+#endif /* SKIP_WLFC_ON_CONCURRENT */
+ return FALSE;
}
+#endif /* PROP_TXSTATUS */
-int net_os_wake_lock(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+#ifdef BCMDBGFS
+#include <linux/debugfs.h>
- if (dhd)
- ret = dhd_os_wake_lock(&dhd->pub);
- return ret;
-}
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
-int dhd_os_wake_unlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+dhd_dbgfs_t g_dbgfs;
- dhd_os_wake_lock_timeout(pub);
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
- if (dhd->wakelock_counter > 0) {
- dhd->wakelock_counter--;
-#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
- STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_relax(pub);
-#endif // endif
- }
- ret = dhd->wakelock_counter;
- }
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- }
- return ret;
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
}
-void dhd_event_wake_unlock(dhd_pub_t *pub)
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
- if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_relax(pub);
-#endif // endif
- }
-}
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
-void dhd_pm_wake_unlock(dhd_pub_t *pub)
-{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
- if (dhd) {
- /* if wl_pmwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_pmwake)) {
- wake_unlock(&dhd->wl_pmwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
-}
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
-void dhd_txfl_wake_unlock(dhd_pub_t *pub)
-{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
- if (dhd) {
- /* if wl_txflwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_txflwake)) {
- wake_unlock(&dhd->wl_txflwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ return rval;
}
-int dhd_os_check_wakelock(dhd_pub_t *pub)
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
{
-#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
- KERNEL_VERSION(2, 6, 36)))
- dhd_info_t *dhd;
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
- if (!pub)
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
return 0;
- dhd = (dhd_info_t *)(pub->info);
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
-#ifdef CONFIG_HAS_WAKELOCK
- /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
- if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
- (wake_lock_active(&dhd->wl_wdwake))))
- return 1;
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
- return 1;
-#endif // endif
- return 0;
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
}
-int
-dhd_os_check_wakelock_all(dhd_pub_t *pub)
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
{
-#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
- KERNEL_VERSION(2, 6, 36)))
-#if defined(CONFIG_HAS_WAKELOCK)
- int l1, l2, l3, l4, l7, l8, l9;
- int l5 = 0, l6 = 0;
- int c, lock_active;
-#endif /* CONFIG_HAS_WAKELOCK */
- dhd_info_t *dhd;
+ loff_t pos = -1;
- if (!pub) {
- return 0;
- }
- dhd = (dhd_info_t *)(pub->info);
- if (!dhd) {
- return 0;
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
}
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
-#ifdef CONFIG_HAS_WAKELOCK
- c = dhd->wakelock_counter;
- l1 = wake_lock_active(&dhd->wl_wifi);
- l2 = wake_lock_active(&dhd->wl_wdwake);
- l3 = wake_lock_active(&dhd->wl_rxwake);
- l4 = wake_lock_active(&dhd->wl_ctrlwake);
- l7 = wake_lock_active(&dhd->wl_evtwake);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- l5 = wake_lock_active(&dhd->wl_intrwake);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- l6 = wake_lock_active(&dhd->wl_scanwake);
-#endif /* DHD_USE_SCAN_WAKELOCK */
- l8 = wake_lock_active(&dhd->wl_pmwake);
- l9 = wake_lock_active(&dhd->wl_txflwake);
- lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
- /* Indicate to the Host to avoid going to suspend if internal locks are up */
- if (lock_active) {
- DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
- "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
- __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
- return 1;
- }
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
- return 1;
+static void dhd_dbgfs_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
}
-#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
- return 0;
}
-int net_os_wake_unlock(struct net_device *dev)
+void dhd_dbgfs_init(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
- if (dhd)
- ret = dhd_os_wake_unlock(&dhd->pub);
- return ret;
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbgfs_create();
+
+ return;
}
-int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+void dhd_dbgfs_remove(void)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
- if (dhd) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- /* if wakelock_wd_counter was never used : lock it at once */
- wake_lock(&dhd->wl_wdwake);
-#endif // endif
- }
- dhd->wakelock_wd_counter++;
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- }
- return ret;
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
}
+#endif /* BCMDBGFS */
-int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ int e_dpc = 0, e_rxf = 0, retry_set = 0;
- if (dhd) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_wd_counter > 0) {
- dhd->wakelock_wd_counter = 0;
- if (!dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wdwake);
-#endif // endif
+ if (!(dhd->chan_isvht80)) {
+ DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+ return;
+ }
+
+ if (DPC_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(DPC_CPUCORE));
+ } else {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(PRIMARY_CPUCORE));
}
- }
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+ return;
+ }
+ if (e_dpc < 0)
+ OSL_SLEEP(1);
+ } while (e_dpc < 0);
}
- return ret;
+ if (RXF_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(RXF_CPUCORE));
+ } else {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+ return;
+ }
+ if (e_rxf < 0)
+ OSL_SLEEP(1);
+ } while (e_rxf < 0);
+ }
+ DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+ return;
}
+#endif /* CUSTOM_SET_CPUCORE */
-#ifdef BCMPCIE_OOB_HOST_WAKE
-void
-dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
+#ifdef DHD_MCAST_REGEN
+/* Get interface specific ap_isolate configuration */
+int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (dhd) {
- wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->mcast_regen_bss_enable;
}
-void
-dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+/* Set interface specific mcast_regen configuration */
+int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (dhd) {
- /* if wl_intrwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_intrwake)) {
- wake_unlock(&dhd->wl_intrwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ifp->mcast_regen_bss_enable = val;
+
+ /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
+ * is enabled
+ */
+ dhd_update_rx_pkt_chainable_state(dhdp, idx);
+ return BCME_OK;
}
-#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* DHD_MCAST_REGEN */
-#ifdef DHD_USE_SCAN_WAKELOCK
-void
-dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (dhd) {
- wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->ap_isolate;
}
-void
-dhd_os_scan_wake_unlock(dhd_pub_t *pub)
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (dhd) {
- /* if wl_scanwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_scanwake)) {
- wake_unlock(&dhd->wl_scanwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (ifp)
+ ifp->ap_isolate = val;
+
+ return 0;
}
-#endif /* DHD_USE_SCAN_WAKELOCK */
-/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
- * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
- */
-int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+#ifdef DHD_FW_COREDUMP
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ unsigned long flags = 0;
+ dhd_dump_t *dump = NULL;
+ dhd_info_t *dhd_info = NULL;
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ log_dump_type_t type = DLD_BUF_TYPE_ALL;
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd_info = (dhd_info_t *)dhdp->info;
+ dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
+ if (dump == NULL) {
+ DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
+ return;
+ }
+ dump->buf = buf;
+ dump->bufsize = size;
+#ifdef BCMPCIE
+ dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
+ (uint32 *)(&dump->hscb_bufsize));
+#else /* BCMPCIE */
+ dump->hscb_bufsize = 0;
+#endif /* BCMPCIE */
- /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
- if (dhd->waive_wakelock == FALSE) {
-#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
- STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- /* record current lock status */
- dhd->wakelock_before_waive = dhd->wakelock_counter;
- dhd->waive_wakelock = TRUE;
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhdp, "memdump", buf, size);
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
+ /* Print out buffer infomation */
+ dhd_log_dump_buf_addr(dhdp, &type);
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+#endif /* DHD_LOG_DUMP */
+
+ if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+ BUG_ON(1);
+ }
+
+#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
+ defined(DNGL_AXI_ERROR_LOGGING)
+ if (
+#if defined(DEBUG_DNGL_INIT_FAIL)
+ (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
+#endif /* DEBUG_DNGL_INIT_FAIL */
+#ifdef DHD_ERPOM
+ (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
+#endif /* DHD_ERPOM */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ FALSE)
+ {
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
+ log_dump_type_t *flush_type = NULL;
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
+ dhd_info->scheduled_memdump = FALSE;
+ (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
+#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
+ /* for dongle init fail cases, 'dhd_mem_dump' does
+ * not call 'dhd_log_dump', so call it here.
+ */
+ flush_type = MALLOCZ(dhdp->osh,
+ sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_log_dump(dhdp->info, flush_type, 0);
}
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
+ return;
}
- return ret;
+#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
+
+ dhd_info->scheduled_memdump = TRUE;
+ /* bus busy bit for mem dump will be cleared in mem dump
+ * work item context, after mem dump file is written
+ */
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
+ DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+static int
+dhd_mem_dump(void *handle, void *event_info, u8 event)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = NULL;
+ unsigned long flags = 0;
int ret = 0;
+ dhd_dump_t *dump = NULL;
- if (!dhd)
- return 0;
- if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
- return 0;
+ DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
- /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
- if (!dhd->waive_wakelock)
+ dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ ret = -ENODEV;
goto exit;
+ }
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- dhd->waive_wakelock = FALSE;
- /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
- * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
- * the lock in between, do the same by calling wake_unlock or pm_relax
+#ifdef DHD_SSSR_DUMP
+ if (dhdp->sssr_inited && dhdp->collect_sssr) {
+ dhdpcie_sssr_dump(dhdp);
+ }
+ dhdp->collect_sssr = FALSE;
+#endif /* DHD_SSSR_DUMP */
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+ dhd_wait_for_file_dump(dhdp);
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+ dump = (dhd_dump_t *)event_info;
+ if (!dump) {
+ DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
*/
-#ifdef DHD_TRACE_WAKE_LOCK
- if (trace_wklock_onoff) {
- STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
+
+ if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
+ DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
}
-#endif /* DHD_TRACE_WAKE_LOCK */
- if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_stay_awake(&dhd->pub);
-#endif // endif
- } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_relax(&dhd->pub);
-#endif // endif
+ /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
+ * context, no need to schedule another work queue for log dump. In case of
+ * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
+ * cfg layer is itself scheduling the log_dump work queue.
+ * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
+ * collect debug_dump as it may be called from non-sleepable context.
+ */
+#ifdef DHD_LOG_DUMP
+ if (dhd->scheduled_memdump &&
+ dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
+ log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
+ sizeof(log_dump_type_t));
+ if (flush_type) {
+ *flush_type = DLD_BUF_TYPE_ALL;
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_log_dump(dhd, flush_type, 0);
+ }
}
- dhd->wakelock_before_waive = 0;
-exit:
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- return ret;
-}
+#endif /* DHD_LOG_DUMP */
-void dhd_os_wake_lock_init(struct dhd_info *dhd)
-{
- DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
- dhd->wakelock_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- /* wakelocks prevent a system from going into a low power state */
-#ifdef CONFIG_HAS_WAKELOCK
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
- wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
- wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
- wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
- wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
-#ifdef DHD_TRACE_WAKE_LOCK
- dhd_wk_lock_trace_init(dhd);
-#endif /* DHD_TRACE_WAKE_LOCK */
-}
+ clear_debug_dump_time(dhdp->debug_dump_time_str);
-void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
-{
- DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
-#ifdef CONFIG_HAS_WAKELOCK
- dhd->wakelock_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_destroy(&dhd->wl_rxwake);
- wake_lock_destroy(&dhd->wl_ctrlwake);
- wake_lock_destroy(&dhd->wl_evtwake);
- wake_lock_destroy(&dhd->wl_pmwake);
- wake_lock_destroy(&dhd->wl_txflwake);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_destroy(&dhd->wl_intrwake);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- wake_lock_destroy(&dhd->wl_scanwake);
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#ifdef DHD_TRACE_WAKE_LOCK
- dhd_wk_lock_trace_deinit(dhd);
-#endif /* DHD_TRACE_WAKE_LOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
-}
+ /* before calling bug on, wait for other logs to be dumped.
+ * we cannot wait in case dhd_mem_dump is called directly
+ * as it may not be in a sleepable context
+ */
+ if (dhd->scheduled_memdump) {
+ uint bitmask = 0;
+ int timeleft = 0;
+#ifdef DHD_SSSR_DUMP
+ bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
+#endif // endif
+ if (bitmask != 0) {
+ DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
+ &dhdp->dhd_bus_busy_state, bitmask, 0);
+ if ((timeleft == 0) || (timeleft == 1)) {
+ DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ }
+ }
+ }
-bool dhd_os_check_if_up(dhd_pub_t *pub)
-{
- if (!pub)
- return FALSE;
- return pub->up;
-}
+ if (dump->hscb_buf && dump->hscb_bufsize) {
+ DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
+ if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
+ dump->hscb_bufsize, "mem_dump_hscb")) {
+ DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success = FALSE;
+#endif /* DHD_DEBUG_UART */
+ }
+ }
+#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
-/* function to collect firmware, chip id and chip version info */
-void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
-{
- int i;
+ DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
+ if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
+#ifdef DHD_LOG_DUMP
+ dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
+#endif /* DHD_LOG_DUMP */
+ dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success == TRUE &&
+#endif /* DHD_DEBUG_UART */
+#ifdef DNGL_EVENT_SUPPORT
+ dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
+#endif /* DNGL_EVENT_SUPPORT */
+ dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
- i = snprintf(info_string, sizeof(info_string),
- " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
- printf("%s\n", info_string);
+#ifdef SHOW_LOGTRACE
+ /* Wait till logtrace context is flushed */
+ dhd_flush_logtrace_process(dhd);
+#endif /* SHOW_LOGTRACE */
- if (!dhdp)
- return;
+ DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
+ //BUG_ON(1);
+ }
+ DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
- i = snprintf(&info_string[i], sizeof(info_string) - i,
- "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
- dhd_conf_get_chiprev(dhdp));
+exit:
+ if (dump) {
+ MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
+ }
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ dhd->scheduled_memdump = FALSE;
+ if (dhdp->hang_was_pending) {
+ DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
+ dhd_os_send_hang_message(dhdp);
+ dhdp->hang_was_pending = 0;
+ }
+ DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
+ return ret;
}
+#endif /* DHD_FW_COREDUMP */
-int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+#ifdef DHD_SSSR_DUMP
+int
+dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
{
- int ifidx;
- int ret = 0;
- dhd_info_t *dhd = NULL;
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
- if (!net || !DEV_PRIV(net)) {
- DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
- __FUNCTION__, net, DEV_PRIV(net)));
- return -EINVAL;
+ if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
}
- dhd = DHD_DEV_INFO(net);
- if (!dhd)
- return -EINVAL;
-
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
+ NULL, user_buf, dig_buf_size, &pos);
}
+ return ret;
+}
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
-
- ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
- dhd_check_hang(net, &dhd->pub, ret);
-
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+int
+dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
+{
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+ uint dig_buf_size = 0;
+
+ if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
+ }
+ if (dhdp->sssr_dig_buf_after) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
+ NULL, user_buf, dig_buf_size, &pos);
+ }
return ret;
}
-bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+int
+dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
{
- struct net_device *net;
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
- net = dhd_idx2net(dhdp, ifidx);
- if (!net) {
- DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
- return -EINVAL;
+ if (dhdp->sssr_d11_before[core] &&
+ dhdp->sssr_d11_outofreset[core] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
+ NULL, user_buf, len, &pos);
}
-
- return dhd_check_hang(net, dhdp, ret);
+ return ret;
}
-/* Return instance */
-int dhd_get_instance(dhd_pub_t *dhdp)
+int
+dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
{
- return dhdp->info->unit;
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
+ int pos = 0, ret = BCME_ERROR;
+
+ if (dhdp->sssr_d11_after[core] &&
+ dhdp->sssr_d11_outofreset[core]) {
+ ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
+ NULL, user_buf, len, &pos);
+ }
+ return ret;
}
-#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
-#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
-int dhd_deepsleep(struct net_device *dev, int flag)
+static void
+dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
{
- char iovbuf[20];
- uint powervar = 0;
- dhd_info_t *dhd;
+ dhd_info_t *dhd = dhdinfo;
dhd_pub_t *dhdp;
- int cnt = 0;
- int ret = 0;
+ int i;
+ char before_sr_dump[128];
+ char after_sr_dump[128];
+ unsigned long flags = 0;
+ uint dig_buf_size = 0;
- dhd = DHD_DEV_INFO(dev);
- dhdp = &dhd->pub;
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
- switch (flag) {
- case 1 : /* Deepsleep on */
- DHD_ERROR(("[WiFi] Deepsleep On\n"));
- /* give some time to sysioc_work before deepsleep */
- OSL_SLEEP(200);
-#ifdef PKT_FILTER_SUPPORT
- /* disable pkt filter */
- dhd_enable_packet_filter(0, dhdp);
-#endif /* PKT_FILTER_SUPPORT */
- /* Disable MPC */
- powervar = 0;
- ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
- 0, TRUE);
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
- /* Enable Deepsleep */
- powervar = 1;
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
- NULL, 0, TRUE);
- break;
+ dhdp = &dhd->pub;
- case 0: /* Deepsleep Off */
- DHD_ERROR(("[WiFi] Deepsleep Off\n"));
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
+ goto exit;
+ }
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- /* Disable Deepsleep */
- for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
- powervar = 0;
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
- sizeof(powervar), NULL, 0, TRUE);
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ /* Init file name */
+ memset(before_sr_dump, 0, sizeof(before_sr_dump));
+ memset(after_sr_dump, 0, sizeof(after_sr_dump));
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
- sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("the error of dhd deepsleep status"
- " ret value :%d\n", ret));
- } else {
- if (!(*(int *)iovbuf)) {
- DHD_ERROR(("deepsleep mode is 0,"
- " count: %d\n", cnt));
- break;
- }
- }
+ snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
+ "sssr_dump_core", i, "before_SR");
+ snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
+ "sssr_dump_core", i, "after_SR");
+
+ if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
+ (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
+ dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+ if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
+ dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
+ __FUNCTION__));
}
+ }
+ }
- /* Enable MPC */
- powervar = 1;
- ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
- 0, TRUE);
- break;
+ if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
+ } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
+ dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
}
- return 0;
-}
-#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+ if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
+ dig_buf_size, "sssr_dump_dig_before_SR")) {
+ DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
-#ifdef PROP_TXSTATUS
+ if (dhdp->sssr_dig_buf_after) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
+ dig_buf_size, "sssr_dump_dig_after_SR")) {
+ DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
-void dhd_wlfc_plat_init(void *dhd)
-{
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
- return;
+exit:
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
}
-void dhd_wlfc_plat_deinit(void *dhd)
+void
+dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
{
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ dhdp->sssr_dump_mode = dump_mode;
+
+ /*
+ * If kernel does not have file write access enabled
+ * then skip writing dumps to files.
+ * The dumps will be pushed to HAL layer which will
+ * write into files
+ */
+#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
return;
+#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+
+ /*
+ * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
+ * Without workqueue -
+ * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
+ * : These are called in own handler, not in the interrupt context
+ * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
+ * Thus, it doesn't neeed to dump SSSR in workqueue
+ */
+ DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
+ dhd_sssr_dump_to_file(dhdp->info);
+
}
+#endif /* DHD_SSSR_DUMP */
-bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
+#ifdef DHD_LOG_DUMP
+static void
+dhd_log_dump(void *handle, void *event_info, u8 event)
{
-#ifdef SKIP_WLFC_ON_CONCURRENT
+ dhd_info_t *dhd = handle;
+ log_dump_type_t *type = (log_dump_type_t *)event_info;
+
+ if (!dhd || !type) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
#ifdef WL_CFG80211
- struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
- if (net)
- /* enable flow control in vsdb mode */
- return !(wl_cfg80211_is_concurrent_mode(net));
-#else
- return TRUE; /* skip flow control */
-#endif /* WL_CFG80211 */
+ /* flush the fw side logs */
+ wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
+ FW_LOGSET_MASK_ALL);
+#endif // endif
+ /* there are currently 3 possible contexts from which
+ * log dump can be scheduled -
+ * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
+ * 3.HEALTH CHECK event
+ * The concise debug info buffer is a shared resource
+ * and in case a trap is one of the contexts then both the
+ * scheduled work queues need to run because trap data is
+ * essential for debugging. Hence a mutex lock is acquired
+ * before calling do_dhd_log_dump().
+ */
+ DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
+ dhd_os_logdump_lock(&dhd->pub);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
+ DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
+ }
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_os_logdump_unlock(&dhd->pub);
+}
-#else
- return FALSE;
-#endif /* SKIP_WLFC_ON_CONCURRENT */
- return FALSE;
+void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
+{
+ DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ type, DHD_WQ_WORK_DHD_LOG_DUMP,
+ dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-#endif /* PROP_TXSTATUS */
-#ifdef BCMDBGFS
-#include <linux/debugfs.h>
+static void
+dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
+{
+#if 0
+#ifdef DHD_FW_COREDUMP
+ if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
+ (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
+ (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
+#else
+ if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
+#endif
+ {
+#if defined(CONFIG_ARM64)
+ DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
+ name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+#elif defined(__ARM_ARCH_7A__)
+ DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
+ name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+#endif /* __ARM_ARCH_7A__ */
+ }
+#endif
+}
-typedef struct dhd_dbgfs {
- struct dentry *debugfs_dir;
- struct dentry *debugfs_mem;
- dhd_pub_t *dhdp;
- uint32 size;
-} dhd_dbgfs_t;
+static void
+dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
+{
+ int i;
+ unsigned long wr_size = 0;
+ struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
+ size_t log_size = 0;
+ char buf_name[DHD_PRINT_BUF_NAME_LEN];
+ dhd_dbg_ring_t *ring = NULL;
-dhd_dbgfs_t g_dbgfs;
+ BCM_REFERENCE(ring);
-extern uint32 dhd_readregl(void *bp, uint32 addr);
-extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ log_size = (unsigned long)dld_buf->max -
+ (unsigned long)dld_buf->buffer;
+ if (dld_buf->wraparound) {
+ wr_size = log_size;
+ } else {
+ wr_size = (unsigned long)dld_buf->present -
+ (unsigned long)dld_buf->front;
+ }
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
+ scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
+ dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
+ }
-static int
-dhd_dbg_state_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
+#ifdef EWP_ECNTRS_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
-static ssize_t
-dhd_dbg_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- ssize_t rval;
- uint32 tmp;
- loff_t pos = *ppos;
- size_t ret;
+ ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
+ dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
+ LOG_DUMP_ECNTRS_MAX_BUFSIZE);
+ }
+#endif /* EWP_ECNTRS_LOGGING */
- if (pos < 0)
- return -EINVAL;
- if (pos >= g_dbgfs.size || !count)
- return 0;
- if (count > g_dbgfs.size - pos)
- count = g_dbgfs.size - pos;
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
+ dhd_statlog_get_logbuf_len(dhdp));
+ }
+#endif /* DHD_STATUS_LOGGING */
- /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
- tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+#ifdef EWP_RTT_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_rtt_enable &&
+ dhdp->rtt_dbg_ring) {
- ret = copy_to_user(ubuf, &tmp, 4);
- if (ret == count)
- return -EFAULT;
+ ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
+ dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
+ dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
+ LOG_DUMP_RTT_MAX_BUFSIZE);
+ }
+#endif /* EWP_RTT_LOGGING */
- count -= ret;
- *ppos = pos + count;
- rval = count;
+#ifdef BCMPCIE
+ if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
+ dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
+ BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+#endif /* BCMPCIE */
- return rval;
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ /* if health check event was received */
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
+ HEALTH_CHK_BUF_SIZE);
+ }
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+
+ /* append the concise debug information */
+ if (dhdp->concise_dbg_buf) {
+ dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
+ CONCISE_DUMP_BUFLEN);
+ }
}
-static ssize_t
-dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+#ifdef DHD_SSSR_DUMP
+int
+dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
{
- loff_t pos = *ppos;
- size_t ret;
- uint32 buf;
+ int i = 0;
- if (pos < 0)
- return -EINVAL;
- if (pos >= g_dbgfs.size || !count)
- return 0;
- if (count > g_dbgfs.size - pos)
- count = g_dbgfs.size - pos;
+ DHD_ERROR(("%s\n", __FUNCTION__));
- ret = copy_from_user(&buf, ubuf, sizeof(uint32));
- if (ret == count)
- return -EFAULT;
+ /* core 0 */
+ i = 0;
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C0_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
- /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
- dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+ /* core 1 */
+ i = 1;
+ if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
+ (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
+ arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_BEFORE]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
+ dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
+#endif /* DHD_LOG_DUMP */
+ }
+ if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
+ arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_C1_D11_AFTER]));
+#ifdef DHD_LOG_DUMP
+ dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
+ dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
+#endif /* DHD_LOG_DUMP */
+ }
- return count;
+ if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+ arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
+ arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_BEFORE]));
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_AFTER]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_before) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
+ dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
+ }
+ if (dhd->sssr_dig_buf_after) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
+ dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
+ }
+#endif /* DHD_LOG_DUMP */
+ } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
+ dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
+ arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
+ arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_BEFORE]));
+ DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
+ arr_len[SSSR_DIG_AFTER]));
+#ifdef DHD_LOG_DUMP
+ if (dhd->sssr_dig_buf_before) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
+ dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
+ }
+ if (dhd->sssr_dig_buf_after) {
+ dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
+ dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
+ }
+#endif /* DHD_LOG_DUMP */
+ }
+ return BCME_OK;
}
-loff_t
-dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+void
+dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
{
- loff_t pos = -1;
+ dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhd_pub_t *dhdp = &dhd_info->pub;
- switch (whence) {
- case 0:
- pos = off;
- break;
- case 1:
- pos = file->f_pos + off;
- break;
- case 2:
- pos = g_dbgfs.size - off;
+ if (dhdp->sssr_dump_collected) {
+ dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
}
- return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
}
+#endif /* DHD_SSSR_DUMP */
-static const struct file_operations dhd_dbg_state_ops = {
- .read = dhd_dbg_state_read,
- .write = dhd_debugfs_write,
- .open = dhd_dbg_state_open,
- .llseek = dhd_debugfs_lseek
-};
-
-static void dhd_dbgfs_create(void)
+uint32
+dhd_get_time_str_len()
{
- if (g_dbgfs.debugfs_dir) {
- g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
- NULL, &dhd_dbg_state_ops);
- }
+ char *ts = NULL, time_str[128];
+
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, sizeof(time_str),
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ return strlen(time_str);
}
-void dhd_dbgfs_init(dhd_pub_t *dhdp)
+#if defined(BCMPCIE)
+uint32
+dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
{
- g_dbgfs.dhdp = dhdp;
- g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
- g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
- if (IS_ERR(g_dbgfs.debugfs_dir)) {
- g_dbgfs.debugfs_dir = NULL;
- return;
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
}
- dhd_dbgfs_create();
+ if (!dhdp)
+ return length;
- return;
+ if (dhdp->extended_trap_data) {
+ length = (strlen(EXT_TRAP_LOG_HDR)
+ + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ }
+ return length;
}
+#endif
-void dhd_dbgfs_remove(void)
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+uint32
+dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
{
- debugfs_remove(g_dbgfs.debugfs_mem);
- debugfs_remove(g_dbgfs.debugfs_dir);
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
- bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
+
+ if (!dhdp)
+ return length;
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ length = (strlen(HEALTH_CHK_LOG_HDR)
+ + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
+ }
+ return length;
}
-#endif /* BCMDBGFS */
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
-#ifdef CUSTOM_SET_CPUCORE
-void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+uint32
+dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
{
- int e_dpc = 0, e_rxf = 0, retry_set = 0;
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ uint32 remain_len = 0;
- if (!(dhd->chan_isvht80)) {
- DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
- return;
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
}
- if (DPC_CPUCORE) {
- do {
- if (set == TRUE) {
- e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
- cpumask_of(DPC_CPUCORE));
- } else {
- e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
- cpumask_of(PRIMARY_CPUCORE));
- }
- if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
- DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
- return;
- }
- if (e_dpc < 0)
- OSL_SLEEP(1);
- } while (e_dpc < 0);
- }
- if (RXF_CPUCORE) {
- do {
- if (set == TRUE) {
- e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
- cpumask_of(RXF_CPUCORE));
- } else {
- e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
- cpumask_of(PRIMARY_CPUCORE));
- }
- if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
- DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
- return;
- }
- if (e_rxf < 0)
- OSL_SLEEP(1);
- } while (e_rxf < 0);
- }
-#ifdef DHD_OF_SUPPORT
- interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
-#endif /* DHD_OF_SUPPORT */
- DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+ if (!dhdp)
+ return length;
- return;
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: error getting concise debug info !\n",
+ __FUNCTION__));
+ return length;
+ }
+ length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
+ (CONCISE_DUMP_BUFLEN - remain_len));
+ }
+ return length;
}
-#endif /* CUSTOM_SET_CPUCORE */
-#ifdef DHD_MCAST_REGEN
-/* Get interface specific ap_isolate configuration */
-int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
+uint32
+dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ int length = 0;
+ dhd_info_t *dhd_info;
+
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
- ASSERT(idx < DHD_MAX_IFS);
+ if (!dhdp)
+ return length;
- ifp = dhd->iflist[idx];
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ length = dhd_log_dump_cookie_len(dhdp);
+ }
+ return length;
- return ifp->mcast_regen_bss_enable;
}
-/* Set interface specific mcast_regen configuration */
-int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
+#ifdef DHD_DUMP_PCIE_RINGS
+uint32
+dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ int length = 0;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+ uint16 h2d_flowrings_total;
+ uint32 remain_len = 0;
- ASSERT(idx < DHD_MAX_IFS);
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
- ifp = dhd->iflist[idx];
+ if (!dhdp)
+ return length;
- ifp->mcast_regen_bss_enable = val;
+ if (dhdp->concise_dbg_buf) {
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ if (remain_len <= 0) {
+ DHD_ERROR(("%s: error getting concise debug info !\n",
+ __FUNCTION__));
+ return length;
+ }
+ }
- /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
- * is enabled
- */
- dhd_update_rx_pkt_chainable_state(dhdp, idx);
- return BCME_OK;
+ length += strlen(FLOWRING_DUMP_HDR);
+ length += CONCISE_DUMP_BUFLEN - remain_len;
+ length += sizeof(sec_hdr);
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
+ length += ((H2DRING_TXPOST_ITEMSIZE
+ * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
+ + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
+ + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
+ + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
+ + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
+ + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
+#ifdef EWP_EDL
+ + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+#else
+ + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
+ + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
+#endif /* EWP_EDL */
+ return length;
}
-#endif /* DHD_MCAST_REGEN */
+#endif /* DHD_DUMP_PCIE_RINGS */
-/* Get interface specific ap_isolate configuration */
-int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+#ifdef EWP_ECNTRS_LOGGING
+uint32
+dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
- ASSERT(idx < DHD_MAX_IFS);
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
- ifp = dhd->iflist[idx];
+ if (!dhdp)
+ return length;
- return ifp->ap_isolate;
+ if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
+ length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
}
+#endif /* EWP_ECNTRS_LOGGING */
-/* Set interface specific ap_isolate configuration */
-int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+#ifdef EWP_RTT_LOGGING
+uint32
+dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
+ dhd_info_t *dhd_info;
+ log_dump_section_hdr_t sec_hdr;
+ int length = 0;
+ dhd_dbg_ring_t *ring;
- ifp = dhd->iflist[idx];
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
+ }
- if (ifp)
- ifp->ap_isolate = val;
+ if (!dhdp)
+ return length;
- return 0;
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
+ length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
+ }
+ return length;
}
+#endif /* EWP_RTT_LOGGING */
-#ifdef DHD_FW_COREDUMP
-#if defined(CONFIG_X86)
-#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
-#define MEMDUMPINFO_INST "/data/.memdump.info"
-#endif /* CONFIG_X86 && OEM_ANDROID */
-
-#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
-
-void dhd_get_memdump_info(dhd_pub_t *dhd)
+int
+dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, int type, void *pos)
{
- struct file *fp = NULL;
- uint32 mem_val = DUMP_MEMFILE_MAX;
- int ret = 0;
- char *filepath = MEMDUMPINFO;
-
-#if !defined(CONFIG_X86)
- mem_val = DUMP_DISABLED;
- goto done;
-#endif
+ int ret = BCME_OK;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
- /* Read memdump info from the file */
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
-#if defined(CONFIG_X86)
- /* Check if it is Live Brix Image */
- if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
- goto done;
- }
- /* Try if it is Installed Brix Image */
- filepath = MEMDUMPINFO_INST;
- DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- goto done;
- }
-#else /* Non Brix Android platform */
- goto done;
-#endif /* CONFIG_X86 && OEM_ANDROID */
- }
+ dld_buf = &g_dld_buf[type];
- /* Handle success case */
- ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4);
- if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- filp_close(fp, NULL);
- goto done;
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ } else if (!dhdp) {
+ return BCME_ERROR;
}
- mem_val = bcm_atoi((char *)&mem_val);
-
- filp_close(fp, NULL);
+ DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
-#ifdef DHD_INIT_DEFAULT_MEMDUMP
- if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
- mem_val = DUMP_MEMFILE_BUGON;
-#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+ dhd_init_sec_hdr(&sec_hdr);
-done:
- dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
+ /* write the section header first */
+ ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
+ strlen(dld_hdrs[type].hdr_str), pos);
+ if (ret < 0)
+ goto exit;
+ len -= (uint32)strlen(dld_hdrs[type].hdr_str);
+ len -= (uint32)sizeof(sec_hdr);
+ sec_hdr.type = dld_hdrs[type].sec_type;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+ ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
- DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
+exit:
+ return ret;
}
-void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
+static int
+dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
{
unsigned long flags = 0;
- dhd_dump_t *dump = NULL;
+#ifdef EWP_EDL
+ int i = 0;
+#endif /* EWP_EDL */
dhd_info_t *dhd_info = NULL;
- dhd_info = (dhd_info_t *)dhdp->info;
- dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
- if (dump == NULL) {
- DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
- return;
+
+ /* if dhdp is null, its extremely unlikely that log dump will be scheduled
+ * so not freeing 'type' here is ok, even if we want to free 'type'
+ * we cannot do so, since 'dhdp->osh' is unavailable
+ * as dhdp is null
+ */
+ if (!dhdp || !type) {
+ if (dhdp) {
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ }
+ return BCME_ERROR;
}
- dump->buf = buf;
- dump->bufsize = size;
-#ifdef BCMPCIE
- if (dhdp->hscb_enable) {
- dhd_get_hscb_info(dhdp->prot, (void*)(&dump->hscb_buf),
- (uint32 *)(&dump->hscb_bufsize));
+
+ dhd_info = (dhd_info_t *)dhdp->info;
+ /* in case of trap get preserve logs from ETD */
+#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
+ if (dhdp->dongle_trap_occured &&
+ dhdp->extended_trap_data) {
+ dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
+ &dhd_info->event_data);
}
- else
#endif /* BCMPCIE */
- {
- dump->hscb_bufsize = 0;
+
+ /* flush the event work items to get any fw events/logs
+ * flush_work is a blocking call
+ */
+#ifdef SHOW_LOGTRACE
+#ifdef EWP_EDL
+ if (dhd_info->pub.dongle_edl_support) {
+ /* wait till existing edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ /* dhd_flush_logtrace_process will ensure the work items in the ring
+ * (EDL ring) from rd to wr are processed. But if wr had
+ * wrapped around, only the work items from rd to ring-end are processed.
+ * So to ensure that the work items at the
+ * beginning of ring are also processed in the wrap around case, call
+ * it twice
+ */
+ for (i = 0; i < 2; i++) {
+ /* blocks till the edl items are processed */
+ dhd_flush_logtrace_process(dhd_info);
+ }
+ } else {
+ dhd_flush_logtrace_process(dhd_info);
}
+#else
+ dhd_flush_logtrace_process(dhd_info);
+#endif /* EWP_EDL */
+#endif /* SHOW_LOGTRACE */
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhdp, "memdump", buf, size);
-#endif /* DHD_LOG_DUMP */
+ return BCME_OK;
+}
- if (dhdp->memdump_enabled == DUMP_MEMONLY) {
- BUG_ON(1);
+int
+dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
+{
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
-#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
- if (
-#if defined(DEBUG_DNGL_INIT_FAIL)
- (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
-#endif /* DEBUG_DNGL_INIT_FAIL */
-#ifdef DHD_ERPOM
- (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
-#endif /* DHD_ERPOM */
- FALSE)
- {
-#ifdef DHD_LOG_DUMP
- log_dump_type_t *flush_type = NULL;
-#endif // endif
- dhd_info->scheduled_memdump = FALSE;
- dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
- /* for dongle init fail cases, 'dhd_mem_dump' does
- * not call 'dhd_log_dump', so call it here.
- */
-#ifdef DHD_LOG_DUMP
- flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = DLD_BUF_TYPE_ALL;
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_log_dump(dhdp->info, flush_type, 0);
- }
-#endif /* DHD_LOG_DUMP */
- return;
+ if (!dhdp)
+ return BCME_ERROR;
+
+ memset(dump_path, 0, size);
+
+ switch (dhdp->debug_dump_subcmd) {
+ case CMD_UNWANTED:
+ snprintf(dump_path, size, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
+ DHD_DUMP_SUBSTR_UNWANTED);
+ break;
+ case CMD_DISCONNECTED:
+ snprintf(dump_path, size, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
+ DHD_DUMP_SUBSTR_DISCONNECTED);
+ break;
+ default:
+ snprintf(dump_path, size, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
}
-#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
- dhd_info->scheduled_memdump = TRUE;
- /* bus busy bit for mem dump will be cleared in mem dump
- * work item context, after mem dump file is written
- */
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
- DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+ if (!dhdp->logdump_periodic_flush) {
+ get_debug_dump_time(dhdp->debug_dump_time_str);
+ snprintf(dump_path + strlen(dump_path),
+ size - strlen(dump_path),
+ "_%s", dhdp->debug_dump_time_str);
+ }
+ return BCME_OK;
+}
+
+uint32
+dhd_get_dld_len(int log_type)
+{
+ unsigned long wr_size = 0;
+ unsigned long buf_size = 0;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf;
+ log_dump_section_hdr_t sec_hdr;
+
+ /* calculate the length of the log */
+ dld_buf = &g_dld_buf[log_type];
+ buf_size = (unsigned long)dld_buf->max -
+ (unsigned long)dld_buf->buffer;
+
+ if (dld_buf->wraparound) {
+ wr_size = buf_size;
+ } else {
+ /* need to hold the lock before accessing 'present' and 'remain' ptrs */
+ spin_lock_irqsave(&dld_buf->lock, flags);
+ wr_size = (unsigned long)dld_buf->present -
+ (unsigned long)dld_buf->front;
+ spin_unlock_irqrestore(&dld_buf->lock, flags);
+ }
+ return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
+}
+
+static void
+dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
+{
+ char *ts = NULL;
+ memset(time_str, 0, size);
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, size,
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
}
-static void
-dhd_mem_dump(void *handle, void *event_info, u8 event)
+int
+dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
{
- dhd_info_t *dhd = handle;
- dhd_pub_t *dhdp = NULL;
- dhd_dump_t *dump = event_info;
- unsigned long flags = 0;
+ char *ts = NULL;
+ int ret = 0;
+ char time_str[128];
- DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
+ memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
+ ts = dhd_log_dump_get_timestamp();
+ snprintf(time_str, sizeof(time_str),
+ "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
+ /* write the timestamp hdr to the file first */
+ ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
}
+ return ret;
+}
- dhdp = &dhd->pub;
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+int
+dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
- goto exit;
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
- DHD_GENERAL_UNLOCK(dhdp, flags);
- if (!dump) {
- DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
- goto exit;
- }
+ if (!dhdp)
+ return BCME_ERROR;
- if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
- DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success = FALSE;
-#endif /* DHD_DEBUG_UART */
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
+ strlen(HEALTH_CHK_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
+ sec_hdr.length = HEALTH_CHK_BUF_SIZE;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
}
+exit:
+ return ret;
+}
+#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
- /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
- * context, no need to schedule another work queue for log dump. In case of
- * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
- * cfg layer is itself scheduling the log_dump work queue.
- * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
- * collect debug_dump as it may be called from non-sleepable context.
- */
-#ifdef DHD_LOG_DUMP
- if (dhd->scheduled_memdump &&
- dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
- log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = DLD_BUF_TYPE_ALL;
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_log_dump(dhd, flush_type, 0);
- }
+#ifdef BCMPCIE
+int
+dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
-#endif /* DHD_LOG_DUMP */
- clear_debug_dump_time(dhdp->debug_dump_time_str);
+ if (!dhdp)
+ return BCME_ERROR;
- /* before calling bug on, wait for other logs to be dumped.
- * we cannot wait in case dhd_mem_dump is called directly
- * as it may not be in a sleepable context
- */
- if (dhd->scheduled_memdump) {
- uint bitmask = 0;
- int timeleft = 0;
-#ifdef DHD_SSSR_DUMP
- bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
-#endif // endif
- if (bitmask != 0) {
- DHD_ERROR(("%s: wait for SSSR dump..\n", __FUNCTION__));
- timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
- &dhdp->dhd_bus_busy_state, bitmask, 0);
- if ((timeleft == 0) || (timeleft == 1)) {
- DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
- __FUNCTION__, dhdp->dhd_bus_busy_state));
- }
- }
+ dhd_init_sec_hdr(&sec_hdr);
+
+ /* append extended trap data to the file in case of traps */
+ if (dhdp->dongle_trap_occured &&
+ dhdp->extended_trap_data) {
+ /* write the section header first */
+ ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
+ strlen(EXT_TRAP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
+ sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
+ ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+ /* write the log */
+ ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
+ user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
}
+exit:
+ return ret;
+}
+#endif /* BCMPCIE */
- if (dump->hscb_buf && dump->hscb_bufsize) {
- DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
- if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
- dump->hscb_bufsize, "mem_dump_hscb")) {
- DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success = FALSE;
-#endif /* DHD_DEBUG_UART */
- }
+int
+dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ int ret = BCME_OK;
+ log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd_info;
+
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
- DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
- if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
-#ifdef DHD_LOG_DUMP
- dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
-#endif /* DHD_LOG_DUMP */
- dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success == TRUE &&
-#endif /* DHD_DEBUG_UART */
-#ifdef DNGL_EVENT_SUPPORT
- dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
-#endif /* DNGL_EVENT_SUPPORT */
- dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
+ if (!dhdp)
+ return BCME_ERROR;
-#ifdef SHOW_LOGTRACE
- /* Wait till logtrace context is flushed */
- dhd_flush_logtrace_process(dhd);
-#endif /* SHOW_LOGTRACE */
+ dhd_init_sec_hdr(&sec_hdr);
- DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
- BUG_ON(1);
+ ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
+ sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ len -= (uint32)sizeof(sec_hdr);
+
+ if (dhdp->concise_dbg_buf) {
+ dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
+ if (ret < 0)
+ goto exit;
}
- DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
exit:
- if (dump)
- MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- dhd->scheduled_memdump = FALSE;
- DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
+ return ret;
}
-#endif /* DHD_FW_COREDUMP */
-
-#ifdef DHD_SSSR_DUMP
-static void
-dhd_sssr_dump(void *handle, void *event_info, u8 event)
+int
+dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
{
- dhd_info_t *dhd = handle;
- dhd_pub_t *dhdp;
- int i;
- char before_sr_dump[128];
- char after_sr_dump[128];
- unsigned long flags = 0;
- uint dig_buf_size = 0;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
- DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
+ if (!dhdp)
+ return BCME_ERROR;
+
+ if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
+ ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
}
+ return ret;
+}
- dhdp = &dhd->pub;
+#ifdef DHD_DUMP_PCIE_RINGS
+int
+dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
+{
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ uint32 remain_len = 0;
+ dhd_info_t *dhd_info;
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
- goto exit;
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
- DHD_GENERAL_UNLOCK(dhdp, flags);
- for (i = 0; i < MAX_NUM_D11CORES; i++) {
- /* Init file name */
- memset(before_sr_dump, 0, sizeof(before_sr_dump));
- memset(after_sr_dump, 0, sizeof(after_sr_dump));
+ if (!dhdp)
+ return BCME_ERROR;
- snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
- "sssr_core", i, "before_SR");
- snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
- "sssr_core", i, "after_SR");
+ dhd_init_sec_hdr(&sec_hdr);
- if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
- (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
- dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
- DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
- __FUNCTION__));
- }
- }
- if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
- dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
- DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
- __FUNCTION__));
- }
- }
- }
+ remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
+ memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
- if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
- } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
- }
+ /* write the section header first */
+ ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
+ strlen(FLOWRING_DUMP_HDR), pos);
+ if (ret < 0)
+ goto exit;
- if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
- dig_buf_size, "sssr_dig_before_SR")) {
- DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
- __FUNCTION__));
- }
- }
+ /* Write the ring summary */
+ ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
+ (CONCISE_DUMP_BUFLEN - remain_len), pos);
+ if (ret < 0)
+ goto exit;
- if (dhdp->sssr_dig_buf_after) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
- dig_buf_size, "sssr_dig_after_SR")) {
- DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
- __FUNCTION__));
- }
- }
+ sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
+ sec_hdr.length = len;
+ ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
+ if (ret < 0)
+ goto exit;
+
+ /* write the log */
+ ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
+ if (ret < 0)
+ goto exit;
exit:
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ return ret;
}
+#endif /* DHD_DUMP_PCIE_RINGS */
-void
-dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
+#ifdef EWP_ECNTRS_LOGGING
+int
+dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
{
- unsigned long flags = 0;
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
- dhdp->sssr_dump_mode = dump_mode;
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
- /* bus busy bit for sssr dump will be cleared in sssr dump
- * work item context, after sssr dump files are created
- */
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ if (!dhdp)
+ return BCME_ERROR;
- if (dhdp->info->no_wq_sssrdump) {
- dhd_sssr_dump(dhdp->info, 0, 0);
- return;
+ dhd_init_sec_hdr(&sec_hdr);
+
+ if (logdump_ecntr_enable &&
+ dhdp->ecntr_dbg_ring) {
+ sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
+ ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
+ user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
}
+ return ret;
- DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
- DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-#endif /* DHD_SSSR_DUMP */
+#endif /* EWP_ECNTRS_LOGGING */
-#ifdef DHD_LOG_DUMP
-static void
-dhd_log_dump(void *handle, void *event_info, u8 event)
+#ifdef EWP_RTT_LOGGING
+int
+dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
{
- dhd_info_t *dhd = handle;
- log_dump_type_t *type = (log_dump_type_t *)event_info;
+ log_dump_section_hdr_t sec_hdr;
+ int ret = BCME_OK;
+ dhd_info_t *dhd_info;
- if (!dhd || !type) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
}
-#ifdef WL_CFG80211
- /* flush the fw side logs */
- wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
- FW_LOGSET_MASK_ALL);
-#endif // endif
- /* there are currently 3 possible contexts from which
- * log dump can be scheduled -
- * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
- * 3.HEALTH CHECK event
- * The concise debug info buffer is a shared resource
- * and in case a trap is one of the contexts then both the
- * scheduled work queues need to run because trap data is
- * essential for debugging. Hence a mutex lock is acquired
- * before calling do_dhd_log_dump().
- */
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_os_logdump_lock(&dhd->pub);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
- DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
- }
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_os_logdump_unlock(&dhd->pub);
-}
+ if (!dhdp)
+ return BCME_ERROR;
-void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
-{
- DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- type, DHD_WQ_WORK_DHD_LOG_DUMP,
- dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
-}
+ dhd_init_sec_hdr(&sec_hdr);
-static void
-dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
-{
- if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
- (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)) {
-#if defined(CONFIG_ARM64)
- DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
- name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
-#elif defined(__ARM_ARCH_7A__)
- DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
- name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
-#endif /* __ARM_ARCH_7A__ */
+ if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
+ ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
+ user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
}
+ return ret;
+
}
+#endif /* EWP_RTT_LOGGING */
-static void
-dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
+#ifdef DHD_STATUS_LOGGING
+int
+dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
+ void *fp, uint32 len, void *pos)
{
- int i;
- unsigned long wr_size = 0;
- struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
- size_t log_size = 0;
- char buf_name[DHD_PRINT_BUF_NAME_LEN];
- dhd_dbg_ring_t *ring = NULL;
+ dhd_info_t *dhd_info;
- BCM_REFERENCE(ring);
+ if (dev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
+ dhdp = &dhd_info->pub;
+ }
- for (i = 0; i < DLD_BUFFER_NUM; i++) {
- dld_buf = &g_dld_buf[i];
- log_size = (unsigned long)dld_buf->max -
- (unsigned long)dld_buf->buffer;
- if (dld_buf->wraparound) {
- wr_size = log_size;
- } else {
- wr_size = (unsigned long)dld_buf->present -
- (unsigned long)dld_buf->front;
- }
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
+ if (!dhdp) {
+ return BCME_ERROR;
}
-#ifdef EWP_ECNTRS_LOGGING
- /* periodic flushing of ecounters is NOT supported */
- if (*type == DLD_BUF_TYPE_ALL &&
- logdump_ecntr_enable &&
- dhdp->ecntr_dbg_ring) {
+ return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
+}
- ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
- dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
- dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
- LOG_DUMP_ECNTRS_MAX_BUFSIZE);
- }
-#endif /* EWP_ECNTRS_LOGGING */
+uint32
+dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd_info;
+ uint32 length = 0;
-#ifdef BCMPCIE
- if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
- dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
- BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ if (ndev) {
+ dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
+ dhdp = &dhd_info->pub;
}
-#endif /* BCMPCIE */
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- /* if health check event was received */
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
- HEALTH_CHK_BUF_SIZE);
+ if (dhdp) {
+ length = dhd_statlog_get_logbuf_len(dhdp);
}
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
- /* append the concise debug information */
- if (dhdp->concise_dbg_buf) {
- dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
- CONCISE_DUMP_BUFLEN);
- }
+ return length;
+}
+#endif /* DHD_STATUS_LOGGING */
+
+void
+dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
+{
+ /* prep the section header */
+ memset(sec_hdr, 0, sizeof(*sec_hdr));
+ sec_hdr->magic = LOG_DUMP_MAGIC;
+ sec_hdr->timestamp = local_clock();
}
/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
struct file *fp = NULL;
mm_segment_t old_fs;
loff_t pos = 0;
- unsigned int wr_size = 0;
char dump_path[128];
uint32 file_mode;
unsigned long flags = 0;
- struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
size_t log_size = 0;
size_t fspace_remain = 0;
struct kstat stat;
char time_str[128];
- char *ts = NULL;
- uint32 remain_len = 0;
+ unsigned int len = 0;
log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info = NULL;
DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
- /* if dhdp is null, its extremely unlikely that log dump will be scheduled
- * so not freeing 'type' here is ok, even if we want to free 'type'
- * we cannot do so, since 'dhdp->osh' is unavailable
- * as dhdp is null
- */
- if (!dhdp || !type) {
- if (dhdp) {
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- }
- return BCME_ERROR;
- }
-
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- MFREE(dhdp->osh, type, sizeof(*type));
- DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
- return BCME_ERROR;
- }
- DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
- dhd_info = (dhd_info_t *)dhdp->info;
- /* in case of trap get preserve logs from ETD */
-#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
- if (dhdp->dongle_trap_occured &&
- dhdp->extended_trap_data) {
- dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
- &dhd_info->event_data);
- }
-#endif /* BCMPCIE */
-
- /* flush the event work items to get any fw events/logs
- * flush_work is a blocking call
- */
-#ifdef EWP_EDL
- if (dhd_info->pub.dongle_edl_support) {
- /* wait till existing edl items are processed */
- dhd_flush_logtrace_process(dhd_info);
- /* dhd_flush_logtrace_process will ensure the work items in the ring
- * (EDL ring) from rd to wr are processed. But if wr had
- * wrapped around, only the work items from rd to ring-end are processed.
- * So to ensure that the work items at the
- * beginning of ring are also processed in the wrap around case, call
- * it twice
- */
- for (i = 0; i < 2; i++) {
- /* blocks till the edl items are processed */
- dhd_flush_logtrace_process(dhd_info);
- }
- } else {
- dhd_flush_logtrace_process(dhd_info);
- }
-#else
- dhd_flush_logtrace_process(dhd_info);
-#endif /* EWP_EDL */
-
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- /* Init file name */
- memset(dump_path, 0, sizeof(dump_path));
- switch (dhdp->debug_dump_subcmd) {
- case CMD_UNWANTED:
- snprintf(dump_path, sizeof(dump_path), "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
- DHD_DUMP_SUBSTR_UNWANTED);
- break;
- case CMD_DISCONNECTED:
- snprintf(dump_path, sizeof(dump_path), "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
- DHD_DUMP_SUBSTR_DISCONNECTED);
- break;
- default:
- snprintf(dump_path, sizeof(dump_path), "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
+ goto exit1;
}
+ DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- if (!dhdp->logdump_periodic_flush) {
- get_debug_dump_time(dhdp->debug_dump_time_str);
- snprintf(dump_path, sizeof(dump_path), "%s_" "%s",
- dump_path, dhdp->debug_dump_time_str);
+ if ((ret = dhd_log_flush(dhdp, type)) < 0) {
+ goto exit1;
}
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
- memset(time_str, 0, sizeof(time_str));
- ts = dhd_log_dump_get_timestamp();
- snprintf(time_str, sizeof(time_str),
- "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
DHD_ERROR(("DHD version: %s\n", dhd_version));
DHD_ERROR(("F/W version: %s\n", fw_version));
- DHD_ERROR(("debug_dump_path = %s\n", dump_path));
dhd_log_dump_buf_addr(dhdp, type);
+ dhd_get_time_str(dhdp, time_str, 128);
+
/* if this is the first time after dhd is loaded,
* or, if periodic flush is disabled, clear the log file
*/
if (IS_ERR(fp)) {
ret = PTR_ERR(fp);
DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit;
+ goto exit2;
}
DHD_ERROR(("debug_dump_path = %s\n", dump_path));
#else
ret = PTR_ERR(fp);
DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit;
+ goto exit2;
#endif /* CONFIG_X86 && OEM_ANDROID */
}
ret = vfs_stat(dump_path, &stat);
if (ret < 0) {
DHD_ERROR(("file stat error, err = %d\n", ret));
- goto exit;
+ goto exit2;
}
/* if some one else has changed the file */
ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
if (ret < 0) {
DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
- goto exit;
+ goto exit2;
}
pos = fp->f_pos;
pos = fp->f_pos;
}
}
- /* write the timestamp hdr to the file first */
- ret = vfs_write(fp, time_str, strlen(time_str), &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
- /* prep the section header */
- memset(&sec_hdr, 0, sizeof(sec_hdr));
- sec_hdr.magic = LOG_DUMP_MAGIC;
- sec_hdr.timestamp = local_clock();
+ dhd_print_time_str(0, fp, len, &pos);
for (i = 0; i < DLD_BUFFER_NUM; ++i) {
- unsigned int buf_size = 0;
if (*type != DLD_BUF_TYPE_ALL && i != *type)
continue;
- /* calculate the length of the log */
- dld_buf = &g_dld_buf[i];
- buf_size = (unsigned long)dld_buf->max -
- (unsigned long)dld_buf->buffer;
- if (dld_buf->wraparound) {
- wr_size = buf_size;
- } else {
- /* need to hold the lock before accessing 'present' and 'remain' ptrs */
- spin_lock_irqsave(&dld_buf->lock, flags);
- wr_size = (unsigned long)dld_buf->present -
- (unsigned long)dld_buf->front;
- spin_unlock_irqrestore(&dld_buf->lock, flags);
- }
-
- /* write the section header first */
- sec_hdr.type = dld_hdrs[i].sec_type;
- sec_hdr.length = wr_size;
- vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos);
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
-
- /* re-init dhd_log_dump_buf structure */
- spin_lock_irqsave(&dld_buf->lock, flags);
- dld_buf->wraparound = 0;
- dld_buf->present = dld_buf->front;
- dld_buf->remain = buf_size;
- bzero(dld_buf->buffer, buf_size);
- spin_unlock_irqrestore(&dld_buf->lock, flags);
-
+ len = dhd_get_dld_len(i);
+ dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
if (*type != DLD_BUF_TYPE_ALL)
break;
}
logdump_ecntr_enable &&
dhdp->ecntr_dbg_ring) {
dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
- fp, (unsigned long *)&pos, &sec_hdr);
+ fp, (unsigned long *)&pos,
+ &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
}
#endif /* EWP_ECNTRS_LOGGING */
-#ifdef BCMPCIE
- /* append extended trap data to the file in case of traps */
- if (dhdp->dongle_trap_occured &&
- dhdp->extended_trap_data) {
- /* write the section header first */
- vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
- sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, (char *)dhdp->extended_trap_data,
- BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of ext trap info,"
- " err = %d\n", ret));
- goto exit;
+#ifdef DHD_STATUS_LOGGING
+ if (dhdp->statlog) {
+ /* write the statlog */
+ len = dhd_get_status_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
+ len, &pos) < 0) {
+ goto exit2;
+ }
}
}
+#endif /* DHD_STATUS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ /* periodic flushing of ecounters is NOT supported */
+ if (*type == DLD_BUF_TYPE_ALL &&
+ logdump_rtt_enable &&
+ dhdp->rtt_dbg_ring) {
+ dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
+ fp, (unsigned long *)&pos,
+ &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
+ }
+#endif /* EWP_RTT_LOGGING */
+
+#ifdef BCMPCIE
+ len = dhd_get_ext_trap_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
+ }
#endif /* BCMPCIE */
#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- /* if health check event was received, dump to file */
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- /* write the section header first */
- vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
- sec_hdr.length = HEALTH_CHK_BUF_SIZE;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, (char *)dhdp->health_chk_event_data,
- HEALTH_CHK_BUF_SIZE, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of health chk info,"
- " err = %d\n", ret));
- goto exit;
- }
+ len = dhd_get_health_chk_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
}
#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
-#ifdef DHD_DUMP_PCIE_RINGS
- /* write the section header first */
- vfs_write(fp, FLOWRING_DUMP_HDR, strlen(FLOWRING_DUMP_HDR), &pos);
- /* Write the ring summary */
- ret = vfs_write(fp, dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN - remain_len, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of concise debug info,"
- " err = %d\n", ret));
- goto exit;
- }
- sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
- sec_hdr.length = ((H2DRING_TXPOST_ITEMSIZE
- * H2DRING_TXPOST_MAX_ITEM)
- + (D2HRING_TXCMPLT_ITEMSIZE
- * D2HRING_TXCMPLT_MAX_ITEM)
- + (H2DRING_RXPOST_ITEMSIZE
- * H2DRING_RXPOST_MAX_ITEM)
- + (D2HRING_RXCMPLT_ITEMSIZE
- * D2HRING_RXCMPLT_MAX_ITEM)
- + (H2DRING_CTRL_SUB_ITEMSIZE
- * H2DRING_CTRL_SUB_MAX_ITEM)
- + (D2HRING_CTRL_CMPLT_ITEMSIZE
- * D2HRING_CTRL_CMPLT_MAX_ITEM)
- + (H2DRING_INFO_BUFPOST_ITEMSIZE
- * H2DRING_DYNAMIC_INFO_MAX_ITEM)
- + (D2HRING_INFO_BUFCMPLT_ITEMSIZE
- * D2HRING_DYNAMIC_INFO_MAX_ITEM));
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = dhd_d2h_h2d_ring_dump(dhdp, fp, (unsigned long *)&pos);
- if (ret < 0) {
- DHD_ERROR(("%s: error dumping ring data!\n",
- __FUNCTION__));
- goto exit;
+ len = dhd_get_dhd_dump_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
}
-#endif /* DHD_DUMP_PCIE_RINGS */
- /* append the concise debug information to the file.
- * This is the information which is seen
- * when a 'dhd dump' iovar is fired
- */
- if (dhdp->concise_dbg_buf) {
- remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- if (remain_len <= 0) {
- DHD_ERROR(("%s: error getting concise debug info !\n",
- __FUNCTION__));
- goto exit;
- } else {
- /* write the section header first */
- vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos);
- sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
- sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len;
- vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
- /* write the log */
- ret = vfs_write(fp, dhdp->concise_dbg_buf,
- CONCISE_DUMP_BUFLEN - remain_len, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of concise debug info,"
- " err = %d\n", ret));
- goto exit;
- }
- }
+ len = dhd_get_cookie_log_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
}
- if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
- ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos);
- if (ret < 0) {
- DHD_ERROR(("write file error of cooke info, err = %d\n", ret));
- goto exit;
- }
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(NULL, dhdp);
+ if (len) {
+ if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
+ goto exit2;
}
+#endif // endif
if (dhdp->logdump_periodic_flush) {
/* store the last position written to in the file for future use */
dhdp->last_file_posn = pos;
}
-exit:
- MFREE(dhdp->osh, type, sizeof(*type));
+exit2:
if (!IS_ERR(fp) && fp != NULL) {
filp_close(fp, NULL);
DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
__FUNCTION__, dump_path));
}
set_fs(old_fs);
+exit1:
+ if (type) {
+ MFREE(dhdp->osh, type, sizeof(*type));
+ }
DHD_GENERAL_LOCK(dhdp, flags);
DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
dhd_os_busbusy_wake(dhdp);
}
#endif /* DHD_LOG_DUMP */
-#ifdef BCMASSERT_LOG
-#define ASSERTINFO "/data/misc/wifi/.assert.info"
-void dhd_get_assert_info(dhd_pub_t *dhd)
+/* This function writes data to the file pointed by fp, OR
+ * copies data to the user buffer sent by upper layer(HAL).
+ */
+int
+dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
{
- struct file *fp = NULL;
- char *filepath = ASSERTINFO;
- int mem_val = -1;
+ int ret = BCME_OK;
- /*
- * Read assert info from the file
- * 0: Trigger Kernel crash by panic()
- * 1: Print out the logs and don't trigger Kernel panic. (default)
- * 2: Trigger Kernel crash by BUG()
- * File doesn't exist: Keep default value (1).
- */
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- } else {
- int ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4);
+ if (fp) {
+ ret = compat_vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- } else {
- mem_val = bcm_atoi((char *)&mem_val);
- DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
}
- filp_close(fp, NULL);
+ } else {
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall()) {
+#else
+ if (is_compat_task()) {
+#endif /* LINUX_VER >= 4.6 */
+ void * usr_ptr = compat_ptr((uintptr_t) user_buf);
+ ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
+ mem_buf, buf_len);
+ if (ret) {
+ DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
+ goto exit;
+ }
+ }
+ else
+#endif /* CONFIG_COMPAT */
+ {
+ ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
+ mem_buf, buf_len);
+ if (ret) {
+ DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
+ goto exit;
+ }
+ }
+ (*(int *)pos) += buf_len;
}
- /* By default. set to 0, Kernel Panic */
- g_assert_type = (mem_val >= 0) ? mem_val : 0;
+exit:
+ return ret;
}
-#endif /* BCMASSERT_LOG */
/*
* This call is to get the memdump size so that,
orig_len = *size;
if (dhdp->soc_ram) {
if (orig_len >= dhdp->soc_ram_length) {
- memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
- /* reset the storage of dump */
- memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
+ *buf = dhdp->soc_ram;
*size = dhdp->soc_ram_length;
} else {
ret = BCME_BUFTOOSHORT;
return BCME_OK;
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+int
+dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ loff_t pos = 0;
+ if (user_buf == NULL) {
+ DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
+ NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ return ret;
+}
+
+int
+dhd_os_get_axi_error_dump_size(struct net_device *dev)
+{
+ int size = -1;
+
+ size = sizeof(dhd_axi_error_dump_t);
+ if (size < 0) {
+ DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
+ }
+ return size;
+}
+
+void
+dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
+{
+ snprintf(dump_path, len, "%s",
+ DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
{
return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
#endif // endif
#if defined(ARGOS_NOTIFY_CB)
+
+static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v);
+static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
+ unsigned long speed, void *v);
+
int
argos_register_notifier_init(struct net_device *net)
{
if (dhdp == NULL || !dhdp->up) {
goto exit;
}
-#if defined(ARGOS_NOTIFY_CB)
- if (speed > PCIE_IRQ_AFFINITY_THRESHOLD) {
- if (dhdpcie_get_pcieirq(dhd->pub.bus, &pcie_irq)) {
- DHD_ERROR(("%s, Failed to get PCIe IRQ\n", __FUNCTION__));
- } else {
- DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
- __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
- irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
- }
- } else {
- dhd_irq_set_affinity(dhdp);
- }
-#endif /* ARGOS_NOTIFY_CB */
/* Check if reported TPut value is more than threshold value */
if (speed > RPS_TPUT_THRESHOLD) {
if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
return NULL;
}
-#ifdef DHD_DHCP_DUMP
-static void
-dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
-{
- struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
- struct iphdr *h = &b->ip_header;
- uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
- int dhcp_type = 0, len, opt_len;
-
- /* check IP header */
- if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
- return;
- }
-
- /* check UDP port for bootp (67, 68) */
- if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
- b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
- return;
- }
-
- /* check header length */
- if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
- return;
- }
-
- len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
- opt_len = len
- - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
-
- /* parse bootp options */
- if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
- ptr = &b->options[4];
- while (ptr < end && *ptr != 0xff) {
- opt = ptr++;
- if (*opt == 0) {
- continue;
- }
- ptr += *ptr + 1;
- if (ptr >= end) {
- break;
- }
- /* 53 is dhcp type */
- if (*opt == 53) {
- if (opt[1]) {
- dhcp_type = opt[2];
- DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
- ifname, dhcp_types[dhcp_type],
- tx ? "TX" : "RX", dhcp_ops[b->op]));
- break;
- }
- }
- }
- }
-}
-#endif /* DHD_DHCP_DUMP */
-
-#ifdef DHD_ICMP_DUMP
-static void
-dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
-{
- uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
- struct iphdr *iph = (struct iphdr *)pkt;
- struct icmphdr *icmph;
-
- /* check IP header */
- if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
- return;
- }
-
- icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
- if (icmph->type == ICMP_ECHO) {
- DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
- ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
- } else if (icmph->type == ICMP_ECHOREPLY) {
- DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
- ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
- } else {
- DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
- ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
- }
-}
-#endif /* DHD_ICMP_DUMP */
-
-#ifdef SHOW_LOGTRACE
-void
-dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
-{
- dhd_dbg_ring_status_t ring_status;
- uint32 rlen = 0;
-#if defined(DEBUGABILITY)
- rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
- TRACE_LOG_BUF_MAX_SIZE, TRUE);
-#elif defined(EWP_ECNTRS_LOGGING)
- rlen = dhd_dbg_ring_pull_single(dhd_pub->ecntr_dbg_ring, trace_buf_info->buf,
- TRACE_LOG_BUF_MAX_SIZE, TRUE);
-#else
- ASSERT(0);
-#endif /* DEBUGABILITY */
-
- trace_buf_info->size = rlen;
- trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
- if (rlen == 0) {
- trace_buf_info->availability = BUF_NOT_AVAILABLE;
- return;
- }
- dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
- if (ring_status.written_bytes != ring_status.read_bytes) {
- trace_buf_info->availability = NEXT_BUF_AVAIL;
- }
-}
-#endif /* SHOW_LOGTRACE */
-
-bool
+fw_download_status_t
dhd_fw_download_status(dhd_pub_t * dhd_pub)
{
- return dhd_pub->fw_download_done;
+ return dhd_pub->fw_download_status;
}
-int
+static int
dhd_create_to_notifier_skt(void)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
{
struct nlmsghdr *nlh;
struct sk_buff *skb_out;
+ int ret = BCME_ERROR;
BCM_REFERENCE(skb);
if (sender_pid == 0) {
DHD_INFO(("Invalid PID 0\n"));
- return -1;
+ skb_out = NULL;
+ goto err;
}
if ((skb_out = nlmsg_new(size, 0)) == NULL) {
DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
- return -1;
+ ret = BCME_NOMEM;
+ goto err;
}
nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
- NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
- memcpy(nlmsg_data(nlh), (char *)data, size);
-
- if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
- DHD_INFO(("Error sending message\n"));
- }
- return 0;
-}
-
-static ssize_t
-show_enable_ecounter(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
-
- onoff = enable_ecounter;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- dhd_pub_t *dhdp;
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return count;
- }
- dhdp = &dhd->pub;
- if (!FW_SUPPORTED(dhdp, ecounters)) {
- DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
- return count;
- }
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
+ if (nlh == NULL) {
+ DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
+ goto err;
}
+ NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
+ (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
- if (enable_ecounter == onoff) {
- DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
- return count;
+ if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
+ DHD_ERROR(("Error sending message, ret:%d\n", ret));
+ /* skb is already freed inside nlmsg_unicast() on error case */
+ /* explicitly making skb_out to NULL to avoid double free */
+ skb_out = NULL;
+ goto err;
}
-
- enable_ecounter = onoff;
- if (enable_ecounter) {
- if (dhd_start_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
- } else if (dhd_start_event_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
- }
- } else {
- if (dhd_stop_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
- } else if (dhd_stop_event_ecounters(dhdp) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
- }
+ return BCME_OK;
+err:
+ if (skb_out) {
+ nlmsg_free(skb_out);
}
-
- return count;
+ return ret;
}
static void
return (bool)logdump_ecntr_enable;
}
+bool
+dhd_log_dump_rtt_enabled(void)
+{
+ return (bool)logdump_rtt_enable;
+}
+
void
dhd_log_dump_init(dhd_pub_t *dhd)
{
ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
- bufptr);
+ bufptr, TRUE);
if (ret != BCME_OK) {
DHD_ERROR(("%s: unable to init ecntr ring !\n",
__FUNCTION__));
bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
#endif /* EWP_ECNTRS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
+ dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
+ if (!dhd->rtt_dbg_ring)
+ goto fail;
+
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
+ RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
+ bufptr, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: unable to init ecntr ring !\n",
+ __FUNCTION__));
+ goto fail;
+ }
+ DHD_DBG_RING_LOCK(ring->lock, flags);
+ ring->state = RING_ACTIVE;
+ ring->threshold = 0;
+ DHD_DBG_RING_UNLOCK(ring->lock, flags);
+
+ bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
+#endif /* EWP_RTT_LOGGING */
+
/* Concise buffer is used as intermediate buffer for following purposes
* a) pull ecounters records temporarily before
* writing it to file
}
#endif /* EWP_ECNTRS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ if (dhd->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ dhd->rtt_dbg_ring = NULL;
+ }
+#endif /* EWP_RTT_LOGGING */
+
#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
if (prealloc_buf) {
DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
}
#endif /* EWP_ECNTRS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ if (dhd->rtt_dbg_ring) {
+ ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
+ dhd_dbg_ring_deinit(dhd, ring);
+ ring->ring_buf = NULL;
+ ring->ring_size = 0;
+ MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
+ dhd->rtt_dbg_ring = NULL;
+ }
+#endif /* EWP_RTT_LOGGING */
+
/* 'general' buffer points to start of the pre-alloc'd memory */
dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
}
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_LB_TXP
-#define DHD_LB_TXBOUND 64
-/*
- * Function that performs the TX processing on a given CPU
- */
-bool
-dhd_lb_tx_process(dhd_info_t *dhd)
-{
- struct sk_buff *skb;
- int cnt = 0;
- struct net_device *net;
- int ifidx;
- bool resched = FALSE;
-
- DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
- if (dhd == NULL) {
- DHD_ERROR((" Null pointer DHD \r\n"));
- return resched;
- }
-
- BCM_REFERENCE(net);
-
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
-
- /* Base Loop to perform the actual Tx */
- do {
- skb = skb_dequeue(&dhd->tx_pend_queue);
- if (skb == NULL) {
- DHD_TRACE(("Dequeued a Null Packet \r\n"));
- break;
- }
- cnt++;
-
- net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
- ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
- net, ifidx));
-
- __dhd_sendpkt(&dhd->pub, ifidx, skb);
-
- if (cnt >= DHD_LB_TXBOUND) {
- resched = TRUE;
- break;
- }
-
- } while (1);
-
- DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
-
- return resched;
-}
-
-void
-dhd_lb_tx_handler(unsigned long data)
-{
- dhd_info_t *dhd = (dhd_info_t *)data;
-
- if (dhd_lb_tx_process(dhd)) {
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- }
-}
-
-#endif /* DHD_LB_TXP */
-
-/* ----------------------------------------------------------------------------
- * Infrastructure code for sysfs interface support for DHD
- *
- * What is sysfs interface?
- * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
- *
- * Why sysfs interface?
- * This is the Linux standard way of changing/configuring Run Time parameters
- * for a driver. We can use this interface to control "linux" specific driver
- * parameters.
- *
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-
-/* Function to show the history buffer */
-static ssize_t
-show_wklock_trace(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- buf[ret] = '\n';
- buf[ret+1] = 0;
-
- dhd_wk_lock_stats_dump(&dhd->pub);
- return ret+1;
-}
-
-/* Function to enable/disable wakelock trace */
-static ssize_t
-wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- unsigned long flags;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = bcm_strtoul(buf, NULL, 10);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
-
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- trace_wklock_onoff = onoff;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- if (trace_wklock_onoff) {
- printk("ENABLE WAKLOCK TRACE\n");
- } else {
- printk("DISABLE WAKELOCK TRACE\n");
- }
-
- return (ssize_t)(onoff+1);
-}
-#endif /* DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static ssize_t
-show_lbtxp(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = atomic_read(&dhd->lb_txp_active);
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- int i;
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
- atomic_set(&dhd->lb_txp_active, onoff);
-
- /* Since the scheme is changed clear the counters */
- for (i = 0; i < NR_CPUS; i++) {
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
- }
-
- return count;
-}
-
-#endif /* DHD_LB_TXP */
-
-#ifdef DHD_LOG_DUMP
-static ssize_t
-show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_periodic_flush;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_periodic_flush = val;
- return count;
-}
-static ssize_t
-show_logdump_ecntr(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_ecntr_enable;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_ecntr_enable = val;
- return count;
-}
-
-#endif /* DHD_LOG_DUMP */
-/*
- * Generic Attribute Structure for DHD.
- * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
- * to instantiate an object of type dhd_attr, populate it with
- * the required show/store functions (ex:- dhd_attr_cpumask_primary)
- * and add the object to default_attrs[] array, that gets registered
- * to the kobject of dhd (named bcm-dhd).
- */
-
-struct dhd_attr {
- struct attribute attr;
- ssize_t(*show)(struct dhd_info *, char *);
- ssize_t(*store)(struct dhd_info *, const char *, size_t count);
-};
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-static struct dhd_attr dhd_attr_wklock =
- __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
-#endif /* defined(DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static struct dhd_attr dhd_attr_lbtxp =
- __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
-#endif /* DHD_LB_TXP */
-#ifdef DHD_LOG_DUMP
-static struct dhd_attr dhd_attr_logdump_periodic_flush =
- __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
- logdump_periodic_flush_onoff);
-static struct dhd_attr dhd_attr_logdump_ecntr =
- __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
- logdump_ecntr_onoff);
-#endif /* DHD_LOG_DUMP */
-
-static struct dhd_attr dhd_attr_ecounters =
- __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
-
-/* Attribute object that gets registered with "bcm-dhd" kobject tree */
-static struct attribute *default_attrs[] = {
-#if defined(DHD_TRACE_WAKE_LOCK)
- &dhd_attr_wklock.attr,
-#endif // endif
-#if defined(DHD_LB_TXP)
- &dhd_attr_lbtxp.attr,
-#endif /* DHD_LB_TXP */
-#ifdef DHD_LOG_DUMP
- &dhd_attr_logdump_periodic_flush.attr,
- &dhd_attr_logdump_ecntr.attr,
-#endif // endif
- &dhd_attr_ecounters.attr,
- NULL
-};
-
-#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
-#define to_attr(a) container_of(a, struct dhd_attr, attr)
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the show function is called.
- */
-static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->show)
- ret = d_attr->show(dhd, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the store function is called.
- */
-static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->store)
- ret = d_attr->store(dhd, buf, count);
- else
- ret = -EIO;
-
- return ret;
-
-}
-
-static struct sysfs_ops dhd_sysfs_ops = {
- .show = dhd_show,
- .store = dhd_store,
-};
-
-static struct kobj_type dhd_ktype = {
- .sysfs_ops = &dhd_sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-/* Create a kobject and attach to sysfs interface */
-static int dhd_sysfs_init(dhd_info_t *dhd)
-{
- int ret = -1;
-
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return ret;
- }
-
- /* Initialize the kobject */
- ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
- if (ret) {
- kobject_put(&dhd->dhd_kobj);
- DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
- return ret;
- }
-
- /*
- * We are always responsible for sending the uevent that the kobject
- * was added to the system.
- */
- kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
-
- return ret;
-}
-
-/* Done with the kobject and detach the sysfs interface */
-static void dhd_sysfs_exit(dhd_info_t *dhd)
-{
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return;
- }
-
- /* Releae the kobject */
- if (dhd->dhd_kobj.state_initialized)
- kobject_put(&dhd->dhd_kobj);
-}
-
#ifdef DHD_DEBUG_UART
bool
dhd_debug_uart_is_running(struct net_device *dev)
if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
#endif // endif
{
- if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
+ if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
+ dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
#ifdef DHD_FW_COREDUMP
dhdp->memdump_success == FALSE ||
#endif // endif
}
#endif /* PCIE_FULL_DONGLE */
/* ---------------------------- End of sysfs implementation ------------------------------------- */
+
#ifdef SET_PCIE_IRQ_CPU_CORE
void
-dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
+dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
{
- unsigned int irq;
+ unsigned int pcie_irq = 0;
+
if (!dhdp) {
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
return;
return;
}
- if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
+ DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
+
+ if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
+ DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
return;
}
-
- set_irq_cpucore(irq, set);
+
+ /*
+ irq_set_affinity() assign dedicated CPU core PCIe interrupt
+ If dedicated CPU core is not on-line,
+ PCIe interrupt scheduled on CPU core 0
+ */
+ switch (affinity_cmd) {
+ case PCIE_IRQ_AFFINITY_OFF:
+ break;
+ case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
+#if defined(CONFIG_ARCH_SM8150)
+ irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+#else /* Exynos and Others */
+ irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
+#endif /* CONFIG_ARCH_SM8150 */
+ break;
+#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
+ case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
+ DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
+ __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
+ irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
+ break;
+#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
+ default:
+ DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
+ __FUNCTION__, affinity_cmd));
+ }
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
ret = BCME_ERROR;
} else {
if (fp->f_mode & FMODE_WRITE) {
- ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
+ ret = compat_vfs_write(fp, buf, buf_len, &fp->f_pos);
if (ret < 0) {
DHD_ERROR(("%s: Couldn't write file '%s'\n",
__FUNCTION__, filepath));
dhd_get_random_bytes(uint8 *buf, uint len)
{
#ifdef BCMPCIE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
+ int rndlen = get_random_bytes_arch(buf, len);
+ if (rndlen != len) {
+ bzero(buf, len);
+ get_random_bytes(buf, len);
+ }
+#else
get_random_bytes_arch(buf, len);
+#endif // endif
#endif /* BCMPCIE */
return BCME_OK;
}
return;
}
- ret = dhd_bus_perform_flr_with_quiesce(dhdp);
+ ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
if (ret != BCME_DNGL_DEVRESET) {
DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
"toggle REG_ON\n", __FUNCTION__, ret));
void
get_debug_dump_time(char *str)
{
- struct timeval curtime;
+ struct osl_timespec curtime;
unsigned long local_time;
struct rtc_time tm;
if (!strlen(str)) {
- do_gettimeofday(&curtime);
+ osl_do_gettimeofday(&curtime);
local_time = (u32)(curtime.tv_sec -
(sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
rtc_time_to_tm(local_time, &tm);
memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
}
-#define KIRQ_PRINT_BUF_LEN 256
-
-void
-dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- unsigned long flags = 0;
- struct irq_desc *desc;
- int i; /* cpu iterator */
- struct bcmstrbuf strbuf;
- char tmp_buf[KIRQ_PRINT_BUF_LEN];
-
- desc = irq_to_desc(irq_num);
- if (!desc) {
- DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
- return;
- }
- bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
- raw_spin_lock_irqsave(&desc->lock, flags);
- bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
- for_each_online_cpu(i)
- bcm_bprintf(&strbuf, "%10u ",
- desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
- if (desc->irq_data.chip) {
- if (desc->irq_data.chip->name)
- bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
- else
- bcm_bprintf(&strbuf, " %8s", "-");
- } else {
- bcm_bprintf(&strbuf, " %8s", "None");
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
- if (desc->irq_data.domain)
- bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
-#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
- bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
-#endif // endif
-#endif /* LINUX VERSION > 3.1.0 */
-
- if (desc->name)
- bcm_bprintf(&strbuf, "-%-8s", desc->name);
-
- DHD_ERROR(("%s\n", strbuf.origbuf));
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-#endif /* LINUX VERSION > 2.6.28 */
-}
-
-void
-dhd_show_kirqstats(dhd_pub_t *dhd)
-{
- unsigned int irq = -1;
-#ifdef BCMPCIE
- dhdpcie_get_pcieirq(dhd->bus, &irq);
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
-#endif /* BCMSDIO */
- if (irq != -1) {
-#ifdef BCMPCIE
- DHD_ERROR(("DUMP data kernel irq stats : \n"));
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
-#endif /* BCMSDIO */
- dhd_print_kirqstats(dhd, irq);
- }
-#ifdef BCMPCIE_OOB_HOST_WAKE
- irq = dhdpcie_get_oob_irq_num(dhd->bus);
- if (irq) {
- DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
- dhd_print_kirqstats(dhd, irq);
- }
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-}
-
void
dhd_print_tasklet_status(dhd_pub_t *dhd)
{
#define DHD_RING_MAGIC 0x20170910
#define DHD_RING_IDX_INVALID 0xffffffff
+#define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
+#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
+
typedef struct {
uint32 elem_size;
uint32 elem_cnt;
void *elem;
} dhd_fixed_ring_info_t;
+typedef struct {
+ uint32 elem_size;
+ uint32 elem_cnt;
+ uint32 idx; /* -1 : not started */
+ uint32 rsvd; /* reserved for future use */
+
+ /* protected elements during serialization */
+ atomic_t ring_locked;
+ /* check the overwriting */
+ uint32 ring_overwrited;
+
+ /* saved data elements */
+ void *elem;
+} dhd_singleidx_ring_info_t;
+
typedef struct {
uint32 magic;
uint32 type;
- struct mutex ring_sync; /* pointer to mutex */
+ void *ring_sync; /* spinlock for sync */
union {
dhd_fixed_ring_info_t fixed;
+ dhd_singleidx_ring_info_t single;
};
} dhd_ring_info_t;
}
void *
-dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt)
+dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
+ uint32 elem_cnt, uint32 type)
{
dhd_ring_info_t *ret_ring;
DHD_RING_ERR(("NO RING BUFFER\n"));
return NULL;
}
+
if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
return NULL;
}
+ if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
+ return NULL;
+ }
+
ret_ring = (dhd_ring_info_t *)buf;
- ret_ring->type = DHD_RING_TYPE_FIXED;
- mutex_init(&ret_ring->ring_sync);
- ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
- ret_ring->fixed.elem_size = elem_size;
- ret_ring->fixed.elem_cnt = elem_cnt;
+ ret_ring->type = type;
+ ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
ret_ring->magic = DHD_RING_MAGIC;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
+ ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->fixed.elem_size = elem_size;
+ ret_ring->fixed.elem_cnt = elem_cnt;
+ } else {
+ ret_ring->single.idx = DHD_RING_IDX_INVALID;
+ atomic_set(&ret_ring->single.ring_locked, 0);
+ ret_ring->single.ring_overwrited = 0;
+ ret_ring->single.rsvd = 0;
+ ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
+ ret_ring->single.elem_size = elem_size;
+ ret_ring->single.elem_cnt = elem_cnt;
+ }
+
return ret_ring;
}
void
-dhd_ring_deinit(void *_ring)
+dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- dhd_fixed_ring_info_t *fixed;
if (!ring) {
return;
}
return;
}
- mutex_destroy(&ring->ring_sync);
- fixed = &ring->fixed;
- memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
- fixed->elem_size = fixed->elem_cnt = 0;
+ if (ring->type != DHD_RING_TYPE_FIXED &&
+ ring->type != DHD_RING_TYPE_SINGLE_IDX) {
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
+ ring->ring_sync = NULL;
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = &ring->fixed;
+ memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
+ fixed->elem_size = fixed->elem_cnt = 0;
+ } else {
+ dhd_singleidx_ring_info_t *single = &ring->single;
+ memset(single->elem, 0, single->elem_size * single->elem_cnt);
+ single->elem_size = single->elem_cnt = 0;
+ }
ring->type = 0;
ring->magic = 0;
- return;
}
+static inline uint32
+__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
+{
+ uint32 diff;
+ uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
+ uint32 elem_size, elem_cnt;
+ void *elem;
+
+ if (type == DHD_RING_TYPE_FIXED) {
+ dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
+ elem_size = fixed->elem_size;
+ elem_cnt = fixed->elem_cnt;
+ elem = fixed->elem;
+ } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
+ dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
+ elem_size = single->elem_size;
+ elem_cnt = single->elem_cnt;
+ elem = single->elem;
+ } else {
+ DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
+ return ret_idx;
+ }
+
+ if (ptr < elem) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
+ if (diff % elem_size != 0) {
+ DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
+ return ret_idx;
+ }
+ ret_idx = diff / elem_size;
+ if (ret_idx >= elem_cnt) {
+ DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
+ }
+ return ret_idx;
+}
+
+/* Sub functions for fixed ring */
/* get counts between two indexes of ring buffer (internal only) */
static inline int
__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
}
-static inline uint32
-__dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig)
-{
- uint32 diff;
- uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
-
- if (ptr < ring->elem) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
- return ret_idx;
- }
- diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem);
- if (diff % ring->elem_size != 0) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
- return ret_idx;
- }
- ret_idx = diff / ring->elem_size;
- if (ret_idx >= ring->elem_cnt) {
- DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx));
- }
- return ret_idx;
-}
-
static inline void *
-__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev)
+__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
{
uint32 cur_idx;
return NULL;
}
- cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT");
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
if (cur_idx >= ring->elem_cnt) {
return NULL;
}
}
static inline void *
-__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev)
+__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
{
uint32 cur_idx;
DHD_RING_ERR(("EMPTY RING\n"));
return NULL;
}
- cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV");
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
if (cur_idx >= ring->elem_cnt) {
return NULL;
}
}
static inline void
-__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr)
+__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
{
uint32 first_idx;
uint32 last_idx;
}
if (first_ptr) {
- first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST");
+ first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
if (first_idx >= ring->elem_cnt) {
return;
}
}
if (last_ptr) {
- last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST");
+ last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
if (last_idx >= ring->elem_cnt) {
return;
}
return;
}
+static inline void
+__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->read_idx = idx;
+}
+
+static inline void
+__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+{
+ ring->write_idx = idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->read_idx;
+}
+
+static inline uint32
+__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
+{
+ return ring->write_idx;
+}
+
+/* Sub functions for single index ring */
+static inline void *
+__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
+{
+ uint32 tmp_idx = 0;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ if (ring->ring_overwrited) {
+ tmp_idx = (ring->idx + 1) % ring->elem_cnt;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
+{
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ ring->idx = 0;
+ return (uint8 *)ring->elem;
+ }
+
+ /* check the lock is held */
+ if (atomic_read(&ring->ring_locked)) {
+ return NULL;
+ }
+
+ /* check the index rollover */
+ if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
+ ring->ring_overwrited = 1;
+ }
+
+ ring->idx = (ring->idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
+}
+
+static inline void *
+__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + 1) % ring->elem_cnt;
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void *
+__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+{
+ uint32 cur_idx;
+
+ if (ring->idx == DHD_RING_IDX_INVALID) {
+ DHD_RING_ERR(("EMPTY RING\n"));
+ return NULL;
+ }
+ cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
+ if (cur_idx >= ring->elem_cnt) {
+ return NULL;
+ }
+
+ if (!ring->ring_overwrited && cur_idx == 0) {
+ /* no more new record */
+ return NULL;
+ }
+
+ cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
+ if (ring->ring_overwrited && cur_idx == ring->idx) {
+ /* no more new record */
+ return NULL;
+ }
+
+ return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+}
+
+static inline void
+__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
+{
+ if (!atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 1);
+ }
+}
+
+static inline void
+__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
+{
+ if (atomic_read(&ring->ring_locked)) {
+ atomic_set(&ring->ring_locked, 0);
+ }
+}
+
/* Get first element : oldest element */
void *
dhd_ring_get_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_first(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
dhd_ring_free_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_free_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+uint32
+dhd_ring_get_read_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 read_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return read_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return read_idx;
+}
+
+uint32
+dhd_ring_get_write_idx(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ uint32 write_idx = DHD_RING_IDX_INVALID;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return write_idx;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_FIXED) {
+ write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
+ return write_idx;
}
/* Get latest element */
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_last(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_last(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_get_empty(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_empty(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_next(&ring->fixed, cur);
+ ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur);
+ ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
+ }
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
int cnt = 0;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return cnt;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return cnt;
}
dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr);
+ __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
/* free all lock */
dhd_ring_lock_free(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_lock_free(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
void *
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
void *ret = NULL;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return NULL;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
int ret = BCME_ERROR;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return ret;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
return ret;
}
dhd_ring_lock_free_first(void *_ring)
{
dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
if (!ring || ring->magic != DHD_RING_MAGIC) {
DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
return;
}
- mutex_lock(&ring->ring_sync);
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
if (ring->type == DHD_RING_TYPE_FIXED) {
__dhd_fixed_ring_lock_free_first(&ring->fixed);
}
- mutex_unlock(&ring->ring_sync);
- return;
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_whole_lock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_lock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+}
+
+void
+dhd_ring_whole_unlock(void *_ring)
+{
+ dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
+ unsigned long flags;
+
+ if (!ring || ring->magic != DHD_RING_MAGIC) {
+ DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
+ if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
+ __dhd_singleidx_ring_whole_unlock(&ring->single);
+ }
+ DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
-#ifdef DHD_DUMP_MNGR
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
#define DHD_VFS_INODE(dir) (dir->d_inode)
#else
#else
#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
-
-static int
+int
dhd_file_delete(char *path)
{
struct path file_path;
err = kern_path(path, 0, &file_path);
if (err < 0) {
+ DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
return err;
}
- if (FALSE ||
+ if (
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
!d_is_file(file_path.dentry) ||
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
- d_really_is_negative(file_path.dentry)
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
+ d_really_is_negative(file_path.dentry) ||
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-)
+ FALSE)
{
err = -EINVAL;
} else {
return err;
}
-
+#ifdef DHD_DUMP_MNGR
static int
dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
{
if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
+ fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
fm_ptr->elems[fm_idx].file_idx = 0;
}
/* save dump file path */
strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
+ elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
/* change file index to next file index */
elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
#ifdef DHD_MAP_LOGGING
/* Will be called from SMMU fault handler */
void
-dhd_debug_info_dump(void)
+dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
{
dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
uint32 irq = (uint32)-1;
DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
+ DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
dhdp->smmu_fault_occurred = TRUE;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->axi_error = TRUE;
+ dhdp->axi_err_dump->axid = axid;
+ dhdp->axi_err_dump->fault_address = fault_addr;
+#endif /* DNGL_AXI_ERROR_LOGGING */
/* Disable PCIe IRQ */
dhdpcie_get_pcieirq(dhdp->bus, &irq);
disable_irq_nosync(irq);
}
+ /* Take debug information first */
DHD_OS_WAKE_LOCK(dhdp);
- dhd_prot_debug_info_print(dhdp);
- osl_dma_map_dump();
-#ifdef DHD_FW_COREDUMP
- /* Load the dongle side dump to host memory */
- dhdp->memdump_enabled = DUMP_MEMONLY;
- dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
- dhd_bus_mem_dump(dhdp);
-#endif /* DHD_FW_COREDUMP */
+ dhd_prot_smmu_fault_dump(dhdp);
DHD_OS_WAKE_UNLOCK(dhdp);
+
+ /* Take AXI information if possible */
+#ifdef DNGL_AXI_ERROR_LOGGING
+#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
+ dhd_axi_error_dispatch(dhdp);
+#else
+ dhd_axi_error(dhdp);
+#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#endif /* DNGL_AXI_ERROR_LOGGING */
}
-EXPORT_SYMBOL(dhd_debug_info_dump);
+EXPORT_SYMBOL(dhd_smmu_fault_handler);
#endif /* DHD_MAP_LOGGING */
#ifdef DHD_WIFI_SHUTDOWN
{
return (int)kernel_read(file, addr, (size_t)count, &offset);
}
+int
+compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
+{
+ return (int)kernel_write(file, addr, count, offset);
+}
#else
int
compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
{
return kernel_read(file, offset, addr, count);
}
+int
+compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
+{
+ return (int)vfs_write(file, addr, count, offset);
+}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+static void dhd_blk_tsfl_handler(struct work_struct * work)
+{
+ dhd_if_t *ifp = NULL;
+ dhd_pub_t *dhdp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
+ ifp = container_of(work, dhd_if_t, blk_tsfl_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
+ if (ifp) {
+ dhdp = &ifp->info->pub;
+ if (dhdp) {
+ if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
+ (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
+ } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
+ (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
+ DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
+ }
+ }
+ }
+}
+void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
+{
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+}
+void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
+{
+ dhd_if_t *ifp = NULL;
+ if (dev) {
+ ifp = DHD_DEV_IFP(dev);
+ }
+ if (ifp) {
+ ifp->tsync_rcvd = 0;
+ ifp->tsyncack_txed = 0;
+ ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ }
+}
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+static void dhd_m4_state_handler(struct work_struct *work)
+{
+ dhd_if_t *ifp = NULL;
+ /* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct delayed_work *dw = to_delayed_work(work);
+ ifp = container_of(dw, dhd_if_t, m4state_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ if (ifp && ifp->net &&
+ (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
+ DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
+ ifp->net->name));
+ wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
+ }
+}
+
+void
+dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+
+ if (!success) {
+ dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+ if (type == ETHER_TYPE_802_1X) {
+ if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
+ dhd_if_t *ifp = NULL;
+ ifp = dhd->iflist[ifidx];
+ if (!ifp || !ifp->net) {
+ return;
+ }
+
+ DHD_INFO(("%s: M4 TX failed on %d.\n",
+ __FUNCTION__, ifidx));
+
+ OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
+ schedule_delayed_work(&ifp->m4state_work,
+ msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
+ }
+ }
+ }
+}
+
+void
+dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhdinfo;
+ dhd_if_t *ifp;
+
+ if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
+ DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
+ return;
+ }
+
+ dhdinfo = (dhd_info_t *)(dhdp->info);
+ if (!dhdinfo) {
+ DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ifp = dhdinfo->iflist[ifidx];
+ if (ifp) {
+ cancel_delayed_work_sync(&ifp->m4state_work);
+ }
+}
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_HP2P
+unsigned long
+dhd_os_hp2plock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->hp2p_lock, flags);
+ }
+
+ return flags;
+}
+
+void
+dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
+ }
+}
+#endif /* DHD_HP2P */
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void
+dhd_axi_error_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = (dhd_info_t *)handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ dhdp = &dhd->pub;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /**
+ * First save axi error information to a file
+ * because panic should happen right after this.
+ * After dhd reset, dhd reads the file, and do hang event process
+ * to send axi error stored on the file to Bigdata server
+ */
+ if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
+ __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
+ }
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHD_FW_COREDUMP
+#ifdef DHD_SSSR_DUMP
+ dhdp->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+
+exit:
+ /* Trigger kernel panic after taking necessary dumps */
+ BUG_ON(1);
+}
+
+void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
+{
+ DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ type, DHD_WQ_WORK_AXI_ERROR_DUMP,
+ dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef BCMPCIE
+static void
+dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp = NULL;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ BUG_ON(1);
+ return;
+ }
+
+ dhdp = &dhd->pub;
+ dhdpcie_cto_recovery_handler(dhdp);
+}
+
+void
+dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ NULL, DHD_WQ_WORK_CTO_RECOVERY,
+ dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* BCMPCIE */
+
+#ifdef SUPPORT_SET_TID
+/*
+ * Set custom TID value for UDP frame based on UID value.
+ * This will be triggered by android private command below.
+ * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
+ * Mode 0(SET_TID_OFF) : Disable changing TID
+ * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
+ * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
+*/
+void
+dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
+{
+ struct ether_header *eh = NULL;
+ struct sock *sk = NULL;
+ uint8 *pktdata = NULL;
+ uint8 *ip_hdr = NULL;
+ uint8 cur_prio;
+ uint8 prio;
+ uint32 uid;
+
+ if (dhdp->tid_mode == SET_TID_OFF) {
+ return;
+ }
+
+ pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
+ eh = (struct ether_header *) pktdata;
+ ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
+
+ if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
+ return;
+ }
+
+ cur_prio = PKTPRIO(pkt);
+ prio = dhdp->target_tid;
+ uid = dhdp->target_uid;
+
+ if ((cur_prio == prio) ||
+ (cur_prio != PRIO_8021D_BE)) {
+ return;
+ }
+
+ sk = ((struct sk_buff*)(pkt))->sk;
+
+ if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
+ (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
+ PKTSETPRIO(pkt, prio);
+ }
+}
+#endif /* SUPPORT_SET_TID */
+
void *dhd_get_pub(struct net_device *dev)
{
dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
}
return dhd->wd_timer_valid;
}
+
+#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
+/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
+static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
+{
+ struct net_device *primary_ndev = NULL, *br_dev = NULL;
+ int cmd;
+ struct ifreq ifr;
+
+ /* add new interface to bridge dev */
+ if (dev) {
+ int found = 0, i;
+ DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
+ for (i = 0 ; i < ifidx; i++) {
+ DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
+ /* search the primary interface */
+ if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
+ primary_ndev = dhd->pub.info->iflist[i]->net;
+ DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ DHD_ERROR(("Can not find primary dev %s\n", dev->name));
+ return;
+ }
+ cmd = SIOCBRADDIF;
+ ifr.ifr_ifindex = dev->ifindex;
+ } else { /* del interface from bridge dev */
+ primary_ndev = dhd->pub.info->iflist[ifidx]->net;
+ cmd = SIOCBRDELIF;
+ ifr.ifr_ifindex = primary_ndev->ifindex;
+ }
+ /* if primary net device is bridged */
+ if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
+ rtnl_lock();
+ /* get bridge device */
+ br_dev = netdev_master_upper_dev_get(primary_ndev);
+ if (br_dev) {
+ const struct net_device_ops *ops = br_dev->netdev_ops;
+ DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
+ if (ops) {
+ if (cmd == SIOCBRADDIF) {
+ DHD_ERROR(("br call ndo_add_slave\n"));
+ ops->ndo_add_slave(br_dev, dev);
+ /* Also bring wds0.x interface up automatically */
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ }
+ else {
+ DHD_ERROR(("br call ndo_del_slave\n"));
+ ops->ndo_del_slave(br_dev, primary_ndev);
+ }
+ }
+ }
+ else {
+ DHD_ERROR(("no br dev\n"));
+ }
+ rtnl_unlock();
+ }
+ else {
+ DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
+ }
+}
+#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
/*
* DHD Linux header file (dhd_linux exports for cfg80211 and other components)
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux.h 767863 2018-06-15 10:19:19Z $
+ * $Id: dhd_linux.h 816392 2019-04-24 14:39:02Z $
*/
/* wifi platform functions for power, interrupt and pre-alloc, either
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+#ifdef PCIE_FULL_DONGLE
+#include <etd.h>
+#endif /* PCIE_FULL_DONGLE */
+#ifdef WL_MONITOR
+#include <bcmmsgbuf.h>
+#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
+#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
+#endif /* WL_MONITOR */
+
+#define FILE_DUMP_MAX_WAIT_TIME 4000
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef BLOCK_IPV6_PACKET
+#define HEX_PREF_STR "0x"
+#define UNI_FILTER_STR "010000000000"
+#define ZERO_ADDR_STR "000000000000"
+#define ETHER_TYPE_STR "0000"
+#define IPV6_FILTER_STR "20"
+#define ZERO_TYPE_STR "00"
+#endif /* BLOCK_IPV6_PACKET */
+
+typedef struct dhd_if_event {
+ struct list_head list;
+ wl_event_data_if_t event;
+ char name[IFNAMSIZ+1];
+ uint8 mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ int idx; /* iface idx in dongle */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool set_macaddress;
+ bool set_multicast;
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
+ struct net_device_stats stats;
+#ifdef PCIE_FULL_DONGLE
+ struct list_head sta_list; /* sll of associated stations */
+ spinlock_t sta_list_lock; /* lock for manipulating sll */
+#endif /* PCIE_FULL_DONGLE */
+ uint32 ap_isolate; /* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+ bool parp_enable;
+ bool parp_discard;
+ bool parp_allnode;
+ arp_table_t *phnd_arp_table;
+ /* for Per BSS modification */
+ bool dhcp_unicast;
+ bool block_ping;
+ bool grat_arp;
+ bool block_tdls;
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_MCAST_REGEN
+ bool mcast_regen_bss_enable;
+#endif // endif
+ bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
+ uint8 tx_paths_active;
+ bool del_in_progress;
+ bool static_if; /* used to avoid some operations on static_if */
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ struct delayed_work m4state_work;
+ atomic_t m4state;
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ bool recv_reassoc_evt;
+ bool post_roam_evt;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ uint32 tsync_rcvd;
+ uint32 tsyncack_txed;
+ u64 last_sync;
+ struct work_struct blk_tsfl_work;
+#endif /* DHDTCPSYNC_FLOOD_BLK */
+} dhd_if_t;
+
+struct ipv6_work_info_t {
+ uint8 if_idx;
+ char ipv6_addr[IPV6_ADDR_LEN];
+ unsigned long event;
+};
+
+typedef struct dhd_dump {
+ uint8 *buf;
+ int bufsize;
+ uint8 *hscb_buf;
+ int hscb_bufsize;
+} dhd_dump_t;
+#ifdef DNGL_AXI_ERROR_LOGGING
+typedef struct dhd_axi_error_dump {
+ ulong fault_address;
+ uint32 axid;
+ struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1;
+} dhd_axi_error_dump_t;
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+struct dhd_rx_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *net;
+ struct dhd_pub *pub;
+};
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+#if defined(DHD_LB)
+#if !defined(PCIE_FULL_DONGLE)
+#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
+#endif /* !PCIE_FULL_DONGLE */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
+ defined(DHD_LB_STATS)
+#if !defined(DHD_LB)
+#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
+#endif /* !DHD_LB */
+#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
+#endif // endif
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
+#endif // endif
+
+#define HIST_BIN_SIZE 9
+
+#if defined(DHD_LB_TXP)
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_tx_lb_pkttag_fr {
+ struct net_device *net;
+ int ifidx;
+} dhd_tx_lb_pkttag_fr_t;
+
+#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
+#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
+
+#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
+#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
+#endif /* DHD_LB_TXP */
+
+#endif /* DHD_LB */
+
+#ifdef FILTER_IE
+#define FILTER_IE_PATH "/etc/wifi/filter_ie"
+#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
+#define FILE_BLOCK_READ_SIZE 256
+#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
+#endif /* FILTER_IE */
+
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
/* dongle status */
enum wifi_adapter_status {
#ifdef BCMSDIO
int (*get_wake_irq)(void);
#endif // endif
-#if defined(CUSTOM_COUNTRY_CODE)
+#ifdef CUSTOM_FORCE_NODFS_FLAG
void *(*get_country_code)(char *ccode, u32 flags);
-#else /* defined (CUSTOM_COUNTRY_CODE) */
+#else /* defined (CUSTOM_FORCE_NODFS_FLAG) */
void *(*get_country_code)(char *ccode);
#endif
};
} dhd_sta_t;
typedef dhd_sta_t dhd_sta_pool_t;
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+typedef enum {
+ M3_RXED,
+ M4_TXFAILED
+} msg_4way_state_t;
+#define MAX_4WAY_TIMEOUT_MS 2000
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+extern uint32 report_hang_privcmd_err;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(ARGOS_NOTIFY_CB)
+int argos_register_notifier_init(struct net_device *net);
+int argos_register_notifier_deinit(void);
+
+extern int sec_argos_register_notifier(struct notifier_block *n, char *label);
+extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label);
+
+typedef struct {
+ struct net_device *wlan_primary_netdev;
+ int argos_rps_cpus_enabled;
+} argos_rps_ctrl;
+
+#define RPS_TPUT_THRESHOLD 300
+#define DELAY_TO_CLEAR_RPS_CPUS 300
+#endif // endif
+
+#if defined(BT_OVER_SDIO)
+extern void wl_android_set_wifi_on_flag(bool enable);
+#endif /* BT_OVER_SDIO */
+
+#ifdef DHD_LOG_DUMP
+/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
+* 2: DLD_BUF_TYPE_SPECIAL
+*/
+#define DLD_BUFFER_NUM 3
+
+#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
+#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
+#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
+
+#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+/*
+ * Below are different sections that use the prealloced buffer
+ * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE
+ */
+#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+
+#if LOG_DUMP_TOTAL_BUFSIZE < (LOG_DUMP_GENERAL_MAX_BUFSIZE + \
+ LOG_DUMP_PRESERVE_MAX_BUFSIZE + LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE \
+ + LOG_DUMP_FILTER_MAX_BUFSIZE)
+#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings"
+#endif // endif
+
+/* Special buffer is allocated as separately in prealloc */
+#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
+
+#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
+#ifdef CONFIG_LOG_BUF_SHIFT
+/* 15% of kernel log buf size, if for example klog buf size is 512KB
+* 15% of 512KB ~= 80KB
+*/
+#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
+ (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
+#endif /* CONFIG_LOG_BUF_SHIFT */
+
+#define LOG_DUMP_COOKIE_BUFSIZE 1024u
+
+typedef struct {
+ char *hdr_str;
+ log_dump_section_type_t sec_type;
+} dld_hdr_t;
+
+typedef struct {
+ int attr;
+ char *hdr_str;
+ log_dump_section_type_t sec_type;
+ int log_type;
+} dld_log_hdr_t;
+
+#define DHD_PRINT_BUF_NAME_LEN 30
+#endif /* DHD_LOG_DUMP */
+
int dhd_wifi_platform_register_drv(void);
void dhd_wifi_platform_unregister_drv(void);
wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
int dhd_enable_adps(dhd_pub_t *dhd, uint8 on);
#endif // endif
+#ifdef DHDTCPSYNC_FLOOD_BLK
+extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
+extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count);
+int compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset);
#endif /* __DHD_LINUX_H__ */
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_exportfs.c 808905 2019-03-11 10:32:39Z $
+ */
+#include <linux/kobject.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <osl.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_priv.h>
+#ifdef DHD_ADPS_BAM_EXPORT
+#include <wl_bam.h>
+#endif // endif
+
+#ifdef SHOW_LOGTRACE
+extern dhd_pub_t* g_dhd_pub;
+static int dhd_ring_proc_open(struct inode *inode, struct file *file);
+ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff);
+
+static const struct file_operations dhd_ring_proc_fops = {
+ .open = dhd_ring_proc_open,
+ .read = dhd_ring_proc_read,
+ .release = single_release,
+};
+
+static int
+dhd_ring_proc_open(struct inode *inode, struct file *file)
+{
+ int ret = BCME_ERROR;
+ if (inode) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+ ret = single_open(file, 0, PDE_DATA(inode));
+#else
+ /* This feature is not supported for lower kernel versions */
+ ret = single_open(file, 0, NULL);
+#endif // endif
+ } else {
+ DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__));
+ }
+ return ret;
+}
+
+ssize_t
+dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
+{
+ trace_buf_info_t *trace_buf_info;
+ int ret = BCME_ERROR;
+ dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ASSERT(g_dhd_pub);
+
+ trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t));
+ if (trace_buf_info) {
+ dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info);
+ if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
+ {
+ ret = -EFAULT;
+ goto exit;
+ }
+ if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
+ ret = BUF_NOT_AVAILABLE;
+ else
+ ret = trace_buf_info->size;
+ } else
+ DHD_ERROR(("Memory allocation Failed\n"));
+
+exit:
+ if (trace_buf_info) {
+ MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+ }
+ return ret;
+}
+
+void
+dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ dhd_dbg_ring_t *dbg_verbose_ring = NULL;
+
+ dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID);
+ if (dbg_verbose_ring) {
+ if (!proc_create_data("dhd_trace", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dbg_verbose_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_trace procfs interface\n"));
+ }
+ } else {
+ DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n"));
+ }
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ if (!proc_create_data("dhd_ecounters", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->ecntr_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n"));
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ if (!proc_create_data("dhd_rtt", S_IRUSR, NULL, &dhd_ring_proc_fops,
+ dhdp->rtt_dbg_ring)) {
+ DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n"));
+ } else {
+ DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n"));
+ }
+#endif /* EWP_RTT_LOGGING */
+}
+
+void
+dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp)
+{
+#ifdef DEBUGABILITY
+ remove_proc_entry("dhd_trace", NULL);
+#endif /* DEBUGABILITY */
+
+#ifdef EWP_ECNTRS_LOGGING
+ remove_proc_entry("dhd_ecounters", NULL);
+#endif /* EWP_ECNTRS_LOGGING */
+
+#ifdef EWP_RTT_LOGGING
+ remove_proc_entry("dhd_rtt", NULL);
+#endif /* EWP_RTT_LOGGING */
+
+}
+#endif /* SHOW_LOGTRACE */
+
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+extern atomic_t trace_wklock_onoff;
+
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ buf[ret] = '\n';
+ buf[ret+1] = 0;
+
+ dhd_wk_lock_stats_dump(&dhd->pub);
+ return ret+1;
+}
+
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ BCM_REFERENCE(dhd);
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ atomic_set(&trace_wklock_onoff, onoff);
+ if (atomic_read(&trace_wklock_onoff)) {
+ printk("ENABLE WAKLOCK TRACE\n");
+ } else {
+ printk("DISABLE WAKELOCK TRACE\n");
+ }
+
+ return (ssize_t)(onoff+1);
+}
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static ssize_t
+show_lbtxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_txp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int i;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_txp_active, onoff);
+
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+ }
+
+ return count;
+}
+
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+static ssize_t
+show_lbrxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = atomic_read(&dhd->lb_rxp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ int i, j;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+ atomic_set(&dhd->lb_rxp_active, onoff);
+
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+
+ return count;
+}
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LOG_DUMP
+extern int logdump_periodic_flush;
+extern int logdump_ecntr_enable;
+static ssize_t
+show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_periodic_flush;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_periodic_flush = val;
+ return count;
+}
+
+static ssize_t
+show_logdump_ecntr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long val;
+
+ val = logdump_ecntr_enable;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
+ return ret;
+}
+
+static ssize_t
+logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ val = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &val);
+ if (val != 0 && val != 1) {
+ return -EINVAL;
+ }
+ logdump_ecntr_enable = val;
+ return count;
+}
+
+#endif /* DHD_LOG_DUMP */
+
+extern uint enable_ecounter;
+static ssize_t
+show_enable_ecounter(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+
+ onoff = enable_ecounter;
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
+}
+
+static ssize_t
+ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+ dhd_pub_t *dhdp;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dhd->pub;
+ if (!FW_SUPPORTED(dhdp, ecounters)) {
+ DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
+ return count;
+ }
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
+ }
+
+ if (enable_ecounter == onoff) {
+ DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
+ return count;
+ }
+
+ enable_ecounter = onoff;
+ dhd_ecounter_configure(dhdp, enable_ecounter);
+
+ return count;
+}
+
+/*
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr, populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
+
+struct dhd_attr {
+ struct attribute attr;
+ ssize_t(*show)(struct dhd_info *, char *);
+ ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+ __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static struct dhd_attr dhd_attr_lbtxp =
+ __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+static struct dhd_attr dhd_attr_lbrxp =
+ __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LOG_DUMP
+static struct dhd_attr dhd_attr_logdump_periodic_flush =
+ __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
+ logdump_periodic_flush_onoff);
+static struct dhd_attr dhd_attr_logdump_ecntr =
+ __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
+ logdump_ecntr_onoff);
+#endif /* DHD_LOG_DUMP */
+
+static struct dhd_attr dhd_attr_ecounters =
+ __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
+
+/* Attribute object that gets registered with "bcm-dhd" kobject tree */
+static struct attribute *default_attrs[] = {
+#if defined(DHD_TRACE_WAKE_LOCK)
+ &dhd_attr_wklock.attr,
+#endif // endif
+#if defined(DHD_LB_TXP)
+ &dhd_attr_lbtxp.attr,
+#endif /* DHD_LB_TXP */
+#if defined(DHD_LB_RXP)
+ &dhd_attr_lbrxp.attr,
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LOG_DUMP
+ &dhd_attr_logdump_periodic_flush.attr,
+ &dhd_attr_logdump_ecntr.attr,
+#endif // endif
+ &dhd_attr_ecounters.attr,
+ NULL
+};
+
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_ops = {
+ .show = dhd_show,
+ .store = dhd_store,
+};
+
+static struct kobj_type dhd_ktype = {
+ .sysfs_ops = &dhd_sysfs_ops,
+ .default_attrs = default_attrs,
+};
+
+#ifdef DHD_MAC_ADDR_EXPORT
+struct ether_addr sysfs_mac_addr;
+static ssize_t
+show_mac_addr(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, MACF,
+ (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1],
+ (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3],
+ (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]);
+
+ return ret;
+}
+
+static ssize_t
+set_mac_addr(struct dhd_info *dev, const char *buf, size_t count)
+{
+ if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) {
+ DHD_ERROR(("Invalid Mac Address \n"));
+ return -EINVAL;
+ }
+
+ DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr)));
+
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_macaddr =
+ __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr);
+#endif /* DHD_MAC_ADDR_EXPORT */
+
+#ifdef DHD_FW_COREDUMP
+
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+
+uint32
+get_mem_val_from_file(void)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+ char *p_mem_val = NULL;
+ char *filepath = MEMDUMPINFO;
+ int ret = 0;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86)
+ /* Check if it is Live Brix Image */
+ if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
+ goto done;
+ }
+ /* Try if it is Installed Brix Image */
+ filepath = MEMDUMPINFO_INST;
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ }
+#else /* Non Brix Android platform */
+ goto done;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+ }
+
+ /* Handle success case */
+ ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
+
+ p_mem_val = (char*)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+
+ filp_close(fp, NULL);
+
+done:
+ return mem_val;
+}
+
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+
+ mem_val = get_mem_val_from_file();
+ if (mem_val != DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = mem_val;
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
+ mem_val = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#else
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX)
+ dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+#endif /* !DHD_EXPORT_CNTL_FILE */
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_memdump_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ dhdp = &dev->pub;
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled);
+ return ret;
+}
+
+static ssize_t
+set_memdump_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long memval;
+ dhd_pub_t *dhdp;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return count;
+ }
+ dhdp = &dev->pub;
+
+ memval = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &memval);
+
+ dhdp->memdump_enabled = (uint32)memval;
+
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %iu\n", __FUNCTION__, dhdp->memdump_enabled));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_memdump =
+ __ATTR(memdump, 0660, show_memdump_info, set_memdump_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMASSERT_LOG
+#define ASSERTINFO "/data/misc/wifi/.assert.info"
+int
+get_assert_val_from_file(void)
+{
+ struct file *fp = NULL;
+ char *filepath = ASSERTINFO;
+ char *p_mem_val = NULL;
+ int mem_val = -1;
+
+ /*
+ * Read assert info from the file
+ * 0: Trigger Kernel crash by panic()
+ * 1: Print out the logs and don't trigger Kernel panic. (default)
+ * 2: Trigger Kernel crash by BUG()
+ * File doesn't exist: Keep default value (1).
+ */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ } else {
+ int ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ } else {
+ p_mem_val = (char *)&mem_val;
+ p_mem_val[sizeof(uint32) - 1] = '\0';
+ mem_val = bcm_atoi(p_mem_val);
+ DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ }
+ filp_close(fp, NULL);
+ }
+
+ mem_val = (mem_val >= 0) ? mem_val : 0;
+ return mem_val;
+}
+
+void dhd_get_assert_info(dhd_pub_t *dhd)
+{
+#ifndef DHD_EXPORT_CNTL_FILE
+ int mem_val = -1;
+
+ mem_val = get_assert_val_from_file();
+
+ g_assert_type = mem_val;
+#endif /* !DHD_EXPORT_CNTL_FILE */
+}
+
+#ifdef DHD_EXPORT_CNTL_FILE
+static ssize_t
+show_assert_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!dev) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type);
+ return ret;
+
+}
+
+static ssize_t
+set_assert_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long assert_val;
+
+ assert_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &assert_val);
+
+ g_assert_type = (uint32)assert_val;
+
+ DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val));
+ return count;
+
+}
+
+static struct dhd_attr dhd_attr_cntl_assert =
+ __ATTR(assert, 0660, show_assert_info, set_assert_info);
+#endif /* DHD_EXPORT_CNTL_FILE */
+#endif /* BCMASSERT_LOG */
+
+#ifdef DHD_EXPORT_CNTL_FILE
+#if defined(WRITE_WLANINFO)
+static ssize_t
+show_wifiver_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info);
+ return ret;
+}
+
+static ssize_t
+set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set version info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_cntl_wifiver =
+ __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info);
+#endif /* WRITE_WLANINFO */
+
+#if defined(USE_CID_CHECK)
+char cidinfostr[MAX_VNAME_LEN];
+
+static ssize_t
+show_cid_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr);
+ return ret;
+}
+
+static ssize_t
+set_cid_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ int len = strlen(buf) + 1;
+ int maxstrsz;
+ maxstrsz = MAX_VNAME_LEN;
+
+ scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf);
+ DHD_INFO(("%s : CID info string\n", cidinfostr));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_cidinfo =
+ __ATTR(cid, 0660, show_cid_info, set_cid_info);
+#endif /* USE_CID_CHECK */
+
+#if defined(GEN_SOFTAP_INFO_FILE)
+char softapinfostr[SOFTAP_INFO_BUF_SZ];
+static ssize_t
+show_softap_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr);
+ return ret;
+}
+
+static ssize_t
+set_softap_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ DHD_ERROR(("Do not set sofap related info\n"));
+ return -EINVAL;
+}
+
+static struct dhd_attr dhd_attr_cntl_softapinfo =
+ __ATTR(softap, 0660, show_softap_info, set_softap_info);
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#if defined(MIMO_ANT_SETTING)
+unsigned long antsel;
+
+static ssize_t
+show_ant_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel);
+ return ret;
+}
+
+static ssize_t
+set_ant_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long ant_val;
+
+ ant_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &ant_val);
+
+ /*
+ * Check value
+ * 0 - Not set, handle same as file not exist
+ */
+ if (ant_val > 3) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, ant_val));
+ return -EINVAL;
+ }
+
+ antsel = ant_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_antinfo =
+ __ATTR(ant, 0660, show_ant_info, set_ant_info);
+#endif /* MIMO_ANT_SETTING */
+
+#ifdef DHD_PM_CONTROL_FROM_FILE
+extern bool g_pm_control;
+extern uint32 pmmode_val;
+static ssize_t
+show_pm_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!g_pm_control) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val);
+ }
+ return ret;
+}
+
+static ssize_t
+set_pm_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long pm_val;
+
+ pm_val = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &pm_val);
+
+ if (pm_val > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, pm_val));
+ return -EINVAL;
+ }
+
+ if (!pm_val) {
+ g_pm_control = TRUE;
+ } else {
+ g_pm_control = FALSE;
+ }
+
+ pmmode_val = (uint32)pm_val;
+ DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_pminfo =
+ __ATTR(pm, 0660, show_pm_info, set_pm_info);
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+
+#ifdef LOGTRACE_FROM_FILE
+unsigned long logtrace_val = 1;
+
+static ssize_t
+show_logtrace_info(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val);
+ return ret;
+}
+
+static ssize_t
+set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%lu", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ logtrace_val = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n",
+ __FUNCTION__, logtrace_val));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_logtraceinfo =
+ __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info);
+#endif /* LOGTRACE_FROM_FILE */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+uint32 bus_txglom = VALUENOTSET;
+
+static ssize_t
+show_bustxglom(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (bus_txglom == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom);
+ }
+ return ret;
+}
+
+static ssize_t
+set_bustxglom(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = (uint32)bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ bus_txglom = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n",
+ __FUNCTION__, bus_txglom));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_bustxglom =
+ __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom);
+#endif /* BCMSDIO */
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+uint32 roam_off = VALUENOTSET;
+
+static ssize_t
+show_roamoff(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (roam_off == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off);
+ }
+ return ret;
+}
+
+static ssize_t
+set_roamoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ roam_off = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n",
+ __FUNCTION__, roam_off));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_roamoff =
+ __ATTR(roamoff, 0660, show_roamoff, set_roamoff);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+
+#ifdef USE_WL_FRAMEBURST
+uint32 frameburst = VALUENOTSET;
+
+static ssize_t
+show_frameburst(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (frameburst == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst);
+ }
+ return ret;
+}
+
+static ssize_t
+set_frameburst(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ frameburst = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, frameburst));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_frameburst =
+ __ATTR(frameburst, 0660, show_frameburst, set_frameburst);
+#endif /* USE_WL_FRAMEBURST */
+
+#ifdef USE_WL_TXBF
+uint32 txbf = VALUENOTSET;
+
+static ssize_t
+show_txbf(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (txbf == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf);
+ }
+ return ret;
+}
+
+static ssize_t
+set_txbf(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_atoi(buf);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ txbf = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_txbf =
+ __ATTR(txbf, 0660, show_txbf, set_txbf);
+#endif /* USE_WL_TXBF */
+
+#ifdef PROP_TXSTATUS
+uint32 proptx = VALUENOTSET;
+
+static ssize_t
+show_proptx(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (proptx == VALUENOTSET) {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs");
+ } else {
+ ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx);
+ }
+ return ret;
+}
+
+static ssize_t
+set_proptx(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 onoff;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ sscanf(buf, "%u", &onoff);
+
+ if (onoff > 2) {
+ DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
+ __FUNCTION__, onoff));
+ return -EINVAL;
+ }
+
+ proptx = onoff;
+ DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
+ __FUNCTION__, txbf));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_cntl_proptx =
+ __ATTR(proptx, 0660, show_proptx, set_proptx);
+
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+uint32 report_hang_privcmd_err = 1;
+
+static ssize_t
+show_hang_privcmd_err(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err);
+ return ret;
+}
+
+static ssize_t
+set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+ sscanf(buf, "%u", &val);
+
+ report_hang_privcmd_err = val ? 1 : 0;
+ DHD_INFO(("%s: Set report HANG for private cmd error: %d\n",
+ __FUNCTION__, report_hang_privcmd_err));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_hang_privcmd_err =
+ __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err);
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
+uint8 control_he_enab = 1;
+#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+static ssize_t
+show_control_he_enab(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab);
+ return ret;
+}
+
+static ssize_t
+set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count)
+{
+ uint32 val;
+
+ val = bcm_atoi(buf);
+
+ control_he_enab = val ? 1 : 0;
+ DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab));
+ return count;
+}
+
+static struct dhd_attr dhd_attr_control_he_enab=
+__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab);
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+/* Attribute object that gets registered with "wifi" kobject tree */
+static struct attribute *control_file_attrs[] = {
+#ifdef DHD_MAC_ADDR_EXPORT
+ &dhd_attr_cntl_macaddr.attr,
+#endif /* DHD_MAC_ADDR_EXPORT */
+#ifdef DHD_EXPORT_CNTL_FILE
+#ifdef DHD_FW_COREDUMP
+ &dhd_attr_cntl_memdump.attr,
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ &dhd_attr_cntl_assert.attr,
+#endif /* BCMASSERT_LOG */
+#ifdef WRITE_WLANINFO
+ &dhd_attr_cntl_wifiver.attr,
+#endif /* WRITE_WLANINFO */
+#ifdef USE_CID_CHECK
+ &dhd_attr_cntl_cidinfo.attr,
+#endif /* USE_CID_CHECK */
+#ifdef GEN_SOFTAP_INFO_FILE
+ &dhd_attr_cntl_softapinfo.attr,
+#endif /* GEN_SOFTAP_INFO_FILE */
+#ifdef MIMO_ANT_SETTING
+ &dhd_attr_cntl_antinfo.attr,
+#endif /* MIMO_ANT_SETTING */
+#ifdef DHD_PM_CONTROL_FROM_FILE
+ &dhd_attr_cntl_pminfo.attr,
+#endif /* DHD_PM_CONTROL_FROM_FILE */
+#ifdef LOGTRACE_FROM_FILE
+ &dhd_attr_cntl_logtraceinfo.attr,
+#endif /* LOGTRACE_FROM_FILE */
+#ifdef USE_WFA_CERT_CONF
+#ifdef BCMSDIO
+ &dhd_attr_cntl_bustxglom.attr,
+#endif /* BCMSDIO */
+ &dhd_attr_cntl_roamoff.attr,
+#ifdef USE_WL_FRAMEBURST
+ &dhd_attr_cntl_frameburst.attr,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ &dhd_attr_cntl_txbf.attr,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ &dhd_attr_cntl_proptx.attr,
+#endif /* PROP_TXSTATUS */
+#endif /* USE_WFA_CERT_CONF */
+#endif /* DHD_EXPORT_CNTL_FILE */
+#ifdef DHD_ADPS_BAM_EXPORT
+ &dhd_attr_adps_bam.attr,
+#endif /* DHD_ADPS_BAM_EXPORT */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ &dhd_attr_hang_privcmd_err.attr,
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+#if defined(CUSTOM_CONTROL_HE_ENAB)
+ &dhd_attr_control_he_enab.attr,
+#endif /* CUSTOM_CONTROL_HE_ENAB */
+ NULL
+};
+
+#define to_cntl_dhd(k) container_of(k, struct dhd_info, dhd_conf_file_kobj)
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the show function is called.
+ */
+static ssize_t dhd_cntl_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_cntl_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+/*
+ * wifi kobject show function, the "attr" attribute specifices to which
+ * node under "sys/wifi" the store function is called.
+ */
+static ssize_t dhd_cntl_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = to_cntl_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ int ret;
+
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_cntl_ops = {
+ .show = dhd_cntl_show,
+ .store = dhd_cntl_store,
+};
+
+static struct kobj_type dhd_cntl_file_ktype = {
+ .sysfs_ops = &dhd_sysfs_cntl_ops,
+ .default_attrs = control_file_attrs,
+};
+
+/* Create a kobject and attach to sysfs interface */
+int dhd_sysfs_init(dhd_info_t *dhd)
+{
+ int ret = -1;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Initialize the kobject */
+ ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
+ if (ret) {
+ kobject_put(&dhd->dhd_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+ ret = kobject_init_and_add(&dhd->dhd_conf_file_kobj, &dhd_cntl_file_ktype, NULL, "wifi");
+ if (ret) {
+ kobject_put(&dhd->dhd_conf_file_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
+ kobject_uevent(&dhd->dhd_conf_file_kobj, KOBJ_ADD);
+
+ return ret;
+}
+
+/* Done with the kobject and detach the sysfs interface */
+void dhd_sysfs_exit(dhd_info_t *dhd)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return;
+ }
+
+ /* Releae the kobject */
+ if (dhd->dhd_kobj.state_initialized)
+ kobject_put(&dhd->dhd_kobj);
+ if (dhd->dhd_conf_file_kobj.state_initialized)
+ kobject_put(&dhd->dhd_conf_file_kobj);
+}
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_lb.c 805819 2019-02-20 10:49:35Z $
+ */
+
+#include <dhd_linux_priv.h>
+
+extern dhd_pub_t* g_dhd_pub;
+
+#if defined(DHD_LB)
+
+void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+ /* Default CPU allocation for the jobs */
+ atomic_set(&dhd->rx_napi_cpu, 1);
+ atomic_set(&dhd->rx_compl_cpu, 2);
+ atomic_set(&dhd->tx_compl_cpu, 2);
+ atomic_set(&dhd->tx_cpu, 2);
+ atomic_set(&dhd->net_tx_cpu, 0);
+}
+
+void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+ free_cpumask_var(dhd->cpumask_curr_avail);
+ free_cpumask_var(dhd->cpumask_primary);
+ free_cpumask_var(dhd->cpumask_primary_new);
+ free_cpumask_var(dhd->cpumask_secondary);
+ free_cpumask_var(dhd->cpumask_secondary_new);
+}
+
+int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+ int id;
+ uint32 cpus, num_cpus = num_possible_cpus();
+ int ret = 0;
+
+ DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
+ DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
+
+ if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+ DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+ cpumask_clear(dhd->cpumask_primary);
+ cpumask_clear(dhd->cpumask_secondary);
+
+ if (num_cpus > 32) {
+ DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
+ ASSERT(0);
+ }
+
+ cpus = DHD_LB_PRIMARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_primary);
+ }
+
+ cpus = DHD_LB_SECONDARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_secondary);
+ }
+
+ return ret;
+fail:
+ dhd_cpumasks_deinit(dhd);
+ return ret;
+}
+
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ * Primary Set - A CPU mask that carries the First Choice CPUs
+ * Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ * 2) Completion Processiong (Tx, RX) - compl_cpu
+ *
+ * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ * If there are more processors free, it assigns one to compl_cpu.
+ * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
+ * CPU, as much as possible.
+ *
+ * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
+ * would allow Tx completion skb's to be released into a local free pool from
+ * which the rx buffer posts could have been serviced. it is important to note
+ * that a Tx packet may not have a large enough buffer for rx posting.
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+ uint32 primary_available_cpus; /* count of primary available cpus */
+ uint32 secondary_available_cpus; /* count of secondary available cpus */
+ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+ uint32 compl_cpu = 0; /* cpu selected for completion jobs */
+ uint32 tx_cpu = 0; /* cpu selected for tx processing job */
+
+ cpumask_clear(dhd->cpumask_primary_new);
+ cpumask_clear(dhd->cpumask_secondary_new);
+
+ /*
+ * Now select from the primary mask. Even if a Job is
+ * already running on a CPU in secondary group, we still move
+ * to primary CPU. So no conditional checks.
+ */
+ cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+ dhd->cpumask_curr_avail);
+
+ cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+ dhd->cpumask_curr_avail);
+
+ primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
+
+ if (primary_available_cpus > 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+
+ /* If no further CPU is available,
+ * cpumask_next returns >= nr_cpu_ids
+ */
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+
+ /* In case there are no more CPUs, do completions & Tx in same CPU */
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = tx_cpu;
+ }
+
+ DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+ /* -- Now check for the CPUs from the secondary mask -- */
+ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
+
+ DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+ __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
+
+ if (secondary_available_cpus > 0) {
+ /* At this point if napi_cpu is unassigned it means no CPU
+ * is online from Primary Group
+ */
+ if (napi_cpu == 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (tx_cpu == 0) {
+ tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (compl_cpu == 0) {
+ compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ }
+
+ /* If no CPU was available for tx processing, choose CPU 0 */
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
+
+ /* If no CPU was available for completion, choose CPU 0 */
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = 0;
+ }
+ if ((primary_available_cpus == 0) &&
+ (secondary_available_cpus == 0)) {
+ /* No CPUs available from primary or secondary mask */
+ napi_cpu = 1;
+ compl_cpu = 0;
+ tx_cpu = 2;
+ }
+
+ DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+ ASSERT(napi_cpu < nr_cpu_ids);
+ ASSERT(compl_cpu < nr_cpu_ids);
+ ASSERT(tx_cpu < nr_cpu_ids);
+
+ atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+ atomic_set(&dhd->tx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->rx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->tx_cpu, tx_cpu);
+
+ return;
+}
+
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int dhd_cpu_startup_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+
+int dhd_cpu_teardown_callback(unsigned int cpu)
+{
+ dhd_info_t *dhd = g_dhd_pub->info;
+
+ DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+
+ return 0;
+}
+#else
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned long int cpu = (unsigned long int)hcpu;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
+ DHD_INFO(("%s(): LB data is not initialized yet.\n",
+ __FUNCTION__));
+ return NOTIFY_BAD;
+ }
+
+ switch (action)
+ {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd)
+{
+ int cpuhp_ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
+ dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
+
+ if (cpuhp_ret < 0) {
+ DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
+ __FUNCTION__, cpuhp_ret));
+ }
+#else
+ /*
+ * If we are able to initialize CPU masks, lets register to the
+ * CPU Hotplug framework to change the CPU for each job dynamically
+ * using candidacy algorithm.
+ */
+ dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+ register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+ return cpuhp_ret;
+}
+
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
+{
+ int ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ /* Don't want to call tear down while unregistering */
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#else
+ if (dhd->cpu_notifier.notifier_call != NULL) {
+ unregister_cpu_notifier(&dhd->cpu_notifier);
+ }
+#endif // endif
+ return ret;
+}
+
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int i, j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+
+ dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_percpu_run_cnt) {
+ DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+
+ dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+ dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+
+ dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_online_cnt) {
+ DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
+
+ dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_offline_cnt) {
+ DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
+
+ dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txp_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+
+ dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->tx_start_percpu_run_cnt) {
+ DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_rx_hist[j]) {
+ DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+#ifdef DHD_LB_TXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_RXC */
+ return;
+}
+
+void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd->napi_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
+ dhd->napi_percpu_run_cnt = NULL;
+ }
+ if (dhd->rxc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
+ dhd->rxc_percpu_run_cnt = NULL;
+ }
+ if (dhd->txc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
+ dhd->txc_percpu_run_cnt = NULL;
+ }
+ if (dhd->cpu_online_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
+ dhd->cpu_online_cnt = NULL;
+ }
+ if (dhd->cpu_offline_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
+ dhd->cpu_offline_cnt = NULL;
+ }
+
+ if (dhd->txp_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
+ dhd->txp_percpu_run_cnt = NULL;
+ }
+ if (dhd->tx_start_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
+ dhd->tx_start_percpu_run_cnt = NULL;
+ }
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ if (dhd->napi_rx_hist[j]) {
+ MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
+ dhd->napi_rx_hist[j] = NULL;
+ }
+#ifdef DHD_LB_TXC
+ if (dhd->txc_hist[j]) {
+ MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
+ dhd->txc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ if (dhd->rxc_hist[j]) {
+ MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
+ dhd->rxc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_RXC */
+ }
+
+ return;
+}
+
+void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
+ struct bcmstrbuf *strbuf, uint32 **hist)
+{
+ int i, j;
+ uint32 *per_cpu_total;
+ uint32 total = 0;
+ uint32 num_cpus = num_possible_cpus();
+
+ per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
+ if (!per_cpu_total) {
+ DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
+ return;
+ }
+ bzero(per_cpu_total, sizeof(uint32) * num_cpus);
+
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\nBin\n");
+
+ for (i = 0; i < HIST_BIN_SIZE; i++) {
+ bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
+ for (j = 0; j < num_cpus; j++) {
+ bcm_bprintf(strbuf, "%d\t", hist[i][j]);
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "Per CPU Total \t");
+ total = 0;
+ for (i = 0; i < num_cpus; i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ per_cpu_total[i] += (hist[j][i] * (1<<j));
+ }
+ bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+ total += per_cpu_total[i];
+ }
+ bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+
+ if (per_cpu_total) {
+ MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
+ per_cpu_total = NULL;
+ }
+ return;
+}
+
+void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+ int i, num_cpus = num_possible_cpus();
+
+ bcm_bprintf(strbuf, "CPU: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\n");
+
+ bcm_bprintf(strbuf, "Val: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%u\t", *(p+i));
+ bcm_bprintf(strbuf, "\n");
+ return;
+}
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd;
+
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+
+ bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+
+ bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+ dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+ dhd->txc_sched_cnt);
+
+#ifdef DHD_LB_RXP
+ bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_RXC
+ bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
+#endif /* DHD_LB_RXC */
+
+#ifdef DHD_LB_TXC
+ bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
+ dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
+#endif /* DHD_LB_TXC */
+
+#ifdef DHD_LB_TXP
+ bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
+
+ bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
+#endif /* DHD_LB_TXP */
+}
+
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static inline uint32 next_larger_power2(uint32 num)
+{
+ num--;
+ num |= (num >> 1);
+ num |= (num >> 2);
+ num |= (num >> 4);
+ num |= (num >> 8);
+ num |= (num >> 16);
+
+ return (num + 1);
+}
+
+void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+{
+ uint32 bin_power;
+ uint32 *p;
+ bin_power = next_larger_power2(count);
+
+ switch (bin_power) {
+ case 1: p = bin[0] + cpu; break;
+ case 2: p = bin[1] + cpu; break;
+ case 4: p = bin[2] + cpu; break;
+ case 8: p = bin[3] + cpu; break;
+ case 16: p = bin[4] + cpu; break;
+ case 32: p = bin[5] + cpu; break;
+ case 64: p = bin[6] + cpu; break;
+ case 128: p = bin[7] + cpu; break;
+ default : p = bin[8] + cpu; break;
+ }
+
+ *p = *p + 1;
+ return;
+}
+
+void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+
+ return;
+}
+
+void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
+
+void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+#endif /* DHD_LB_STATS */
+
+#endif /* DHD_LB */
+#if defined(DHD_LB)
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+/**
+ * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
+ * @tasklet: tasklet to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * smp_call_function_single with no wait and the tasklet_schedule function
+ * will be invoked to schedule the specified tasklet on the requested CPU.
+ */
+INLINE void
+dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
+{
+ const int wait = 0;
+ smp_call_function_single(on_cpu,
+ dhd_tasklet_schedule, (void *)tasklet, wait);
+}
+
+/**
+ * dhd_work_schedule_on - Executes the passed work in a given CPU
+ * @work: work to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * schedule_work_on and the work function
+ * will be invoked to schedule the specified work on the requested CPU.
+ */
+
+INLINE void
+dhd_work_schedule_on(struct work_struct *work, int on_cpu)
+{
+ schedule_work_on(on_cpu, work);
+}
+
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
+ * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
+ * freeing the packets placed in the tx_compl workq
+ */
+void
+dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->tx_compl_dispatcher_work);
+ }
+}
+
+static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ else
+ dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
+ put_online_cpus();
+}
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+/**
+ * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
+ * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
+ * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
+ * placed in the rx_compl workq.
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+ on_cpu = atomic_read(&dhd->rx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->rx_compl_dispatcher_work);
+ }
+}
+
+void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->rx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ else {
+ dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
+ }
+ put_online_cpus();
+}
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_TXP)
+void dhd_tx_dispatcher_work(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+}
+
+void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
+{
+ int cpu;
+ int net_tx_cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ preempt_disable();
+ cpu = atomic_read(&dhd->tx_cpu);
+ net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
+
+ /*
+ * Now if the NET_TX has pushed the packet in the same
+ * CPU that is chosen for Tx processing, seperate it out
+ * i.e run the TX processing tasklet in compl_cpu
+ */
+ if (net_tx_cpu == cpu)
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if (!cpu_online(cpu)) {
+ /*
+ * Ooohh... but the Chosen CPU is not online,
+ * Do the job in the current CPU itself.
+ */
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ } else {
+ /*
+ * Schedule tx_dispatcher_work to on the cpu which
+ * in turn will schedule tx_tasklet.
+ */
+ dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
+ }
+ preempt_enable();
+}
+
+/**
+ * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
+ * on another cpu. The tx_tasklet will take care of actually putting
+ * the skbs into appropriate flow ring and ringing H2D interrupt
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ /* Record the CPU in which the TX request from Network stack came */
+ atomic_set(&dhd->net_tx_cpu, curr_cpu);
+
+ /* Schedule the work to dispatch ... */
+ dhd_tx_dispatcher_fn(dhdp);
+}
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
+int
+dhd_napi_poll(struct napi_struct *napi, int budget)
+{
+ int ifid;
+ const int pkt_count = 1;
+ const int chan = 0;
+ struct sk_buff * skb;
+ unsigned long flags;
+ struct dhd_info *dhd;
+ int processed = 0;
+ struct sk_buff_head rx_process_queue;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+ __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+ __skb_queue_head_init(&rx_process_queue);
+
+ /* extract the entire rx_napi_queue into local rx_process_queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
+ OSL_PREFETCH(skb->data);
+
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ __FUNCTION__, skb, ifid));
+
+ dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+ processed++;
+ }
+
+ DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
+
+ DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+ napi_complete(napi);
+
+ return budget - 1;
+}
+
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)info;
+
+ DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
+
+ /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+ if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+ __napi_schedule(&dhd->rx_napi_struct);
+#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
+ raise_softirq(NET_RX_SOFTIRQ);
+#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
+ }
+
+ /*
+ * If the rx_napi_struct was already running, then we let it complete
+ * processing all its packets. The rx_napi_struct may only run on one
+ * core at a time, to avoid out-of-order handling.
+ */
+}
+
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
+{
+ int wait = 0; /* asynchronous IPI */
+ DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+ __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
+
+ if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+ DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+ __FUNCTION__, on_cpu));
+ }
+
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+ return 0;
+}
+
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ * work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
+ * run from Work Queue context we have to call these functions
+ */
+void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ dhd_napi_schedule(dhd);
+}
+
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int on_cpu;
+#ifdef DHD_LB_IRQSET
+ cpumask_t cpus;
+#endif /* DHD_LB_IRQSET */
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+ /* append the producer's queue of packets to the napi's rx process queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
+
+ /* if LB RXP is disabled directly schedule NAPI */
+ if (atomic_read(&dhd->lb_rxp_active) == 0) {
+ dhd_napi_schedule(dhd);
+ return;
+ }
+
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ preempt_disable();
+ on_cpu = atomic_read(&dhd->rx_napi_cpu);
+#ifdef DHD_LB_IRQSET
+ if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
+ (!cpu_online(on_cpu)))
+#else
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu)))
+#endif /* DHD_LB_IRQSET */
+ {
+ DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
+ curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
+ dhd_napi_schedule(dhd);
+ } else {
+ DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
+ __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
+ dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+ }
+ preempt_enable();
+}
+
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
+ */
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+ __skb_queue_tail(&dhd->rx_pend_queue, pkt);
+}
+#endif /* DHD_LB_RXP */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void
+dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask)
+{
+ unsigned int irq = (unsigned int)-1;
+ int err = BCME_OK;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhdp->bus) {
+ DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+
+ dhdpcie_get_pcieirq(dhdp->bus, &irq);
+ err = irq_set_affinity(irq, cpumask);
+ if (err)
+ DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
+ __FUNCTION__, *cpumask_bits(cpumask)));
+}
+#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#if defined(DHD_LB_TXP)
+
+int BCMFASTPATH
+dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
+ int ifidx, void *skb)
+{
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
+
+ /* If the feature is disabled run-time do TX from here */
+ if (atomic_read(&dhd->lb_txp_active) == 0) {
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+ return __dhd_sendpkt(&dhd->pub, ifidx, skb);
+ }
+
+ /* Store the address of net device and interface index in the Packet tag */
+ DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
+ DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
+
+ /* Enqueue the skb into tx_pend_queue */
+ skb_queue_tail(&dhd->tx_pend_queue, skb);
+
+ DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
+
+ /* Dispatch the Tx job to be processed by the tx_tasklet */
+ dhd_lb_tx_dispatch(&dhd->pub);
+
+ return NETDEV_TX_OK;
+}
+#endif /* DHD_LB_TXP */
+
+#ifdef DHD_LB_TXP
+#define DHD_LB_TXBOUND 64
+/*
+ * Function that performs the TX processing on a given CPU
+ */
+bool
+dhd_lb_tx_process(dhd_info_t *dhd)
+{
+ struct sk_buff *skb;
+ int cnt = 0;
+ struct net_device *net;
+ int ifidx;
+ bool resched = FALSE;
+
+ DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
+ if (dhd == NULL) {
+ DHD_ERROR((" Null pointer DHD \r\n"));
+ return resched;
+ }
+
+ BCM_REFERENCE(net);
+
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+
+ /* Base Loop to perform the actual Tx */
+ do {
+ skb = skb_dequeue(&dhd->tx_pend_queue);
+ if (skb == NULL) {
+ DHD_TRACE(("Dequeued a Null Packet \r\n"));
+ break;
+ }
+ cnt++;
+
+ net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+ ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
+ net, ifidx));
+
+ __dhd_sendpkt(&dhd->pub, ifidx, skb);
+
+ if (cnt >= DHD_LB_TXBOUND) {
+ resched = TRUE;
+ break;
+ }
+
+ } while (1);
+
+ DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
+
+ return resched;
+}
+
+void
+dhd_lb_tx_handler(unsigned long data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ if (dhd_lb_tx_process(dhd)) {
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ }
+}
+
+#endif /* DHD_LB_TXP */
--- /dev/null
+/*
+ * Packet dump helper functions
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_pktdump.c 820929 2019-05-21 14:09:11Z $
+ */
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <bcmutils.h>
+#include <bcmevent.h>
+#include <bcmendian.h>
+#include <bcmtlv.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <bcmip.h>
+#include <bcmudp.h>
+#include <bcmdhcp.h>
+#include <bcmarp.h>
+#include <bcmicmp.h>
+#include <dhd_linux_pktdump.h>
+#include <dhd_config.h>
+
+#define DHD_PKTDUMP(arg) DHD_ERROR(arg)
+#define DHD_PKTDUMP_MEM(arg) DHD_ERROR(arg)
+#define PACKED_STRUCT __attribute__ ((packed))
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* EAPOL-Key header field size */
+#define AKW_BLOCK_LEN 8
+#define WPA_KEY_REPLAY_LEN 8
+#define WPA_KEY_NONCE_LEN 32
+#define WPA_KEY_IV_LEN 16
+#define WPA_KEY_RSC_LEN 8
+#define WPA_KEY_ID_LEN 8
+#define WPA_KEY_MIC_LEN 16
+#define WPA_MAX_KEY_SIZE 32
+#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+
+/* Key information bit */
+#define KEYINFO_TYPE_MASK (1 << 3)
+#define KEYINFO_INSTALL_MASK (1 << 6)
+#define KEYINFO_KEYACK_MASK (1 << 7)
+#define KEYINFO_KEYMIC_MASK (1 << 8)
+#define KEYINFO_SECURE_MASK (1 << 9)
+#define KEYINFO_ERROR_MASK (1 << 10)
+#define KEYINFO_REQ_MASK (1 << 11)
+
+/* EAP Code */
+#define EAP_CODE_REQUEST 1 /* Request */
+#define EAP_CODE_RESPONSE 2 /* Response */
+#define EAP_CODE_SUCCESS 3 /* Success */
+#define EAP_CODE_FAILURE 4 /* Failure */
+
+/* EAP Type */
+#define EAP_TYPE_RSVD 0 /* Reserved */
+#define EAP_TYPE_IDENT 1 /* Identify */
+#define EAP_TYPE_NOTI 2 /* Notification */
+#define EAP_TYPE_TLS 13 /* EAP-TLS */
+#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */
+#define EAP_TYPE_TTLS 21 /* EAP-TTLS */
+#define EAP_TYPE_AKA 23 /* EAP-AKA */
+#define EAP_TYPE_PEAP 25 /* EAP-PEAP */
+#define EAP_TYPE_FAST 43 /* EAP-FAST */
+#define EAP_TYPE_PSK 47 /* EAP-PSK */
+#define EAP_TYPE_AKAP 50 /* EAP-AKA' */
+#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */
+
+/* WSC */
+#define EAP_HDR_LEN 5
+#define EAP_WSC_NONCE_OFFSET 10
+#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data))
+#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET))
+#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */
+#define WFA_VID_LEN 3 /* WFA VID length */
+#define WFA_VTYPE 1u /* WFA Vendor type */
+
+/* WSC opcode */
+#define WSC_OPCODE_UPNP 0
+#define WSC_OPCODE_START 1
+#define WSC_OPCODE_ACK 2
+#define WSC_OPCODE_NACK 3
+#define WSC_OPCODE_MSG 4
+#define WSC_OPCODE_DONE 5
+#define WSC_OPCODE_FRAG_ACK 6
+
+/* WSC flag */
+#define WSC_FLAG_MF 1 /* more fragements */
+#define WSC_FLAG_LF 2 /* length field */
+
+/* WSC message code */
+#define WSC_ATTR_MSG 0x1022
+#define WSC_MSG_M1 0x04
+#define WSC_MSG_M2 0x05
+#define WSC_MSG_M3 0x07
+#define WSC_MSG_M4 0x08
+#define WSC_MSG_M5 0x09
+#define WSC_MSG_M6 0x0A
+#define WSC_MSG_M7 0x0B
+#define WSC_MSG_M8 0x0C
+
+/* Debug prints */
+typedef enum pkt_cnt_type {
+ PKT_CNT_TYPE_INVALID = 0,
+ PKT_CNT_TYPE_ARP = 1,
+ PKT_CNT_TYPE_DNS = 2,
+ PKT_CNT_TYPE_MAX = 3
+} pkt_cnt_type_t;
+
+typedef struct pkt_cnt {
+ uint32 tx_cnt;
+ uint32 tx_err_cnt;
+ uint32 rx_cnt;
+} pkt_cnt_t;
+
+typedef struct pkt_cnt_log {
+ bool enabled;
+ uint16 reason;
+ timer_list_compat_t pktcnt_timer;
+ pkt_cnt_t arp_cnt;
+ pkt_cnt_t dns_cnt;
+} pkt_cnts_log_t;
+
+#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */
+#define PKT_CNT_RSN_VALID(rsn) \
+ (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX)))
+
+static const char pkt_cnt_msg[][20] = {
+ "INVALID",
+ "ROAM_SUCCESS",
+ "GROUP_KEY_UPDATE",
+ "CONNECT_SUCCESS",
+ "INVALID"
+};
+
+static const char tx_pktfate[][30] = {
+ "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */
+ "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */
+ "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */
+ "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */
+ "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */
+ "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */
+ "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */
+ "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */
+};
+
+#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x"
+#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \
+ ((const eapol_key_hdr_t *)(key))->replay[1], \
+ ((const eapol_key_hdr_t *)(key))->replay[2], \
+ ((const eapol_key_hdr_t *)(key))->replay[3], \
+ ((const eapol_key_hdr_t *)(key))->replay[4], \
+ ((const eapol_key_hdr_t *)(key))->replay[5], \
+ ((const eapol_key_hdr_t *)(key))->replay[6], \
+ ((const eapol_key_hdr_t *)(key))->replay[7]
+#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s"
+#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0))
+#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \
+ (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER")
+#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A")
+#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0))
+
+#define EAP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \
+ str TXFATE_FMT "\n", ifname, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \
+ str "\n", ifname)); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_REPLAY(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \
+ str DBGREPLAY TXFATE_FMT "\n", ifname, \
+ REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \
+ str DBGREPLAY "\n", ifname, \
+ REPLAY_FMT(eap_key))); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \
+ str "ver %d, type %d" TXFATE_FMT "\n", ifname, \
+ eapol_hdr->version, eapol_hdr->type, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \
+ str "ver %d, type %d\n", ifname, \
+ eapol_hdr->version, eapol_hdr->type)); \
+ } \
+ } while (0)
+
+#define EAP_PRINT_OTHER_4WAY(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " str \
+ "ver %d type %d keytype %d keyinfo 0x%02X" \
+ TXFATE_FMT "\n", ifname, eapol_hdr->version, \
+ eapol_hdr->type, eap_key->type, \
+ (uint32)hton16(eap_key->key_info), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " str \
+ "ver %d type %d keytype %d keyinfo 0x%02X\n", \
+ ifname, eapol_hdr->version, eapol_hdr->type, \
+ eap_key->type, (uint32)hton16(eap_key->key_info))); \
+ } \
+ } while (0)
+
+/* EAPOL header */
+typedef struct eapol_header {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ uint8 version; /* EAPOL protocol version */
+ uint8 type; /* EAPOL type */
+ uint16 length; /* Length of body */
+ uint8 body[1]; /* Body (optional) */
+} PACKED_STRUCT eapol_header_t;
+
+/* EAP header */
+typedef struct eap_header_fmt {
+ uint8 code;
+ uint8 id;
+ uint16 len;
+ uint8 type;
+ uint8 data[1];
+} PACKED_STRUCT eap_header_fmt_t;
+
+/* WSC EAP format */
+typedef struct eap_wsc_fmt {
+ uint8 oui[3];
+ uint32 ouitype;
+ uint8 opcode;
+ uint8 flags;
+ uint8 data[1];
+} PACKED_STRUCT eap_wsc_fmt_t;
+
+/* EAPOL-Key */
+typedef struct eapol_key_hdr {
+ uint8 type; /* Key Descriptor Type */
+ uint16 key_info; /* Key Information (unaligned) */
+ uint16 key_len; /* Key Length (unaligned) */
+ uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */
+ uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */
+ uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */
+ uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */
+ uint16 data_len; /* Key Data Length */
+ uint8 data[WPA_KEY_DATA_LEN]; /* Key data */
+} PACKED_STRUCT eapol_key_hdr_t;
+
+msg_eapol_t
+dhd_is_4way_msg(uint8 *pktdata)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type = EAPOL_OTHER;
+ bool pair, ack, mic, kerr, req, sec, install;
+ uint16 key_info;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return type;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ if (eap_key->type != EAPOL_WPA2_KEY) {
+ return type;
+ }
+
+ key_info = hton16(eap_key->key_info);
+ pair = !!(key_info & KEYINFO_TYPE_MASK);
+ ack = !!(key_info & KEYINFO_KEYACK_MASK);
+ mic = !!(key_info & KEYINFO_KEYMIC_MASK);
+ kerr = !!(key_info & KEYINFO_ERROR_MASK);
+ req = !!(key_info & KEYINFO_REQ_MASK);
+ sec = !!(key_info & KEYINFO_SECURE_MASK);
+ install = !!(key_info & KEYINFO_INSTALL_MASK);
+
+ if (pair && !install && ack && !mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M1;
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+ type = EAPOL_4WAY_M2;
+ } else if (pair && ack && mic && sec && !kerr && !req) {
+ type = EAPOL_4WAY_M3;
+ } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_4WAY_M4;
+ } else if (!pair && !install && ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M1;
+ } else if (!pair && !install && !ack && mic && sec && !req && !kerr) {
+ type = EAPOL_GROUPKEY_M2;
+ } else {
+ type = EAPOL_OTHER;
+ }
+
+ return type;
+}
+
+void
+dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen,
+ bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ struct ether_header *eh;
+ uint16 ether_type;
+
+ if (!pktdata || pktlen < ETHER_HDR_LEN) {
+ return;
+ }
+
+ eh = (struct ether_header *)pktdata;
+ ether_type = ntoh16(eh->ether_type);
+ if (ether_type == ETHER_TYPE_802_1X) {
+ dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen,
+ tx, pkthash, pktfate);
+ }
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_ARP) {
+ dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
+ }
+ dhd_trx_pkt_dump(dhdp, ifidx, pktdata, pktlen, tx);
+}
+
+#ifdef DHD_PKTDUMP_ROAM
+static void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype)
+{
+ pkt_cnts_log_t *pktcnts;
+ pkt_cnt_t *cnt;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (!pktcnts) {
+ DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktcnts->enabled || (tx && !pktfate)) {
+ return;
+ }
+
+ if (pkttype == PKT_CNT_TYPE_ARP) {
+ cnt = (pkt_cnt_t *)&pktcnts->arp_cnt;
+ } else if (pkttype == PKT_CNT_TYPE_DNS) {
+ cnt = (pkt_cnt_t *)&pktcnts->dns_cnt;
+ } else {
+ /* invalid packet type */
+ return;
+ }
+
+ if (tx) {
+ TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++;
+ } else {
+ cnt->rx_cnt++;
+ }
+}
+
+static void
+dhd_dump_pkt_timer(unsigned long data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)data;
+ pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ pktcnts->enabled = FALSE;
+
+ /* print out the packet counter value */
+ DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n"));
+ DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason]));
+ DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt,
+ pktcnts->arp_cnt.rx_cnt));
+ DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
+ pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt,
+ pktcnts->dns_cnt.rx_cnt));
+ DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n"));
+}
+
+void
+dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n",
+ __FUNCTION__));
+ return;
+ }
+
+ if (!PKT_CNT_RSN_VALID(rsn)) {
+ DHD_ERROR(("%s: invalid reason code %d\n",
+ __FUNCTION__, rsn));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ if (timer_pending(&pktcnts->pktcnt_timer)) {
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ }
+
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+ pktcnts->reason = rsn;
+ pktcnts->enabled = TRUE;
+ mod_timer(&pktcnts->pktcnt_timer,
+ jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS));
+ DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n",
+ __FUNCTION__, rsn));
+}
+
+void
+dhd_dump_pkt_init(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t));
+ if (!pktcnts) {
+ DHD_ERROR(("%s: failed to allocate memory for pktcnts\n",
+ __FUNCTION__));
+ return;
+ }
+
+ /* init timers */
+ init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp);
+ dhdp->pktcnts = pktcnts;
+}
+
+void
+dhd_dump_pkt_deinit(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t));
+ dhdp->pktcnts = NULL;
+}
+
+void
+dhd_dump_pkt_clear(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+ pktcnts->enabled = FALSE;
+ del_timer_sync(&pktcnts->pktcnt_timer);
+ pktcnts->reason = 0;
+ bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
+ bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
+}
+
+bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp)
+{
+ pkt_cnts_log_t *pktcnts;
+
+ if (!dhdp || !dhdp->pktcnts) {
+ return FALSE;
+ }
+
+ pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
+
+ return pktcnts->enabled;
+}
+#else
+static INLINE void
+dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { }
+static INLINE bool
+dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; }
+#endif /* DHD_PKTDUMP_ROAM */
+
+#ifdef DHD_8021X_DUMP
+static void
+dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ eap_wsc_fmt_t *eap_wsc;
+ char *ifname;
+ uint16 eap_len;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
+ DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ if (eap_hdr->type != EAP_TYPE_EXP) {
+ return;
+ }
+
+ eap_len = ntoh16(eap_hdr->len);
+ if (eap_len < EAP_WSC_MIN_DATA_LEN) {
+ return;
+ }
+
+ eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data);
+ if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) ||
+ (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) {
+ return;
+ }
+
+ if (eap_wsc->flags) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_wsc->opcode == WSC_OPCODE_MSG) {
+ const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data);
+ const uint8 *msg;
+ uint16 msglen;
+ uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET);
+ bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE;
+
+ msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len,
+ WSC_ATTR_MSG, &msglen, opt);
+ if (msg && msglen) {
+ switch (*msg) {
+ case WSC_MSG_M1:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M1");
+ break;
+ case WSC_MSG_M2:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M2");
+ break;
+ case WSC_MSG_M3:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M3");
+ break;
+ case WSC_MSG_M4:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M4");
+ break;
+ case WSC_MSG_M5:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M5");
+ break;
+ case WSC_MSG_M6:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M6");
+ break;
+ case WSC_MSG_M7:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M7");
+ break;
+ case WSC_MSG_M8:
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8;
+ DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WPS M8");
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (eap_wsc->opcode == WSC_OPCODE_START) {
+ dhd->conf->eapol_status = EAPOL_STATUS_WSC_START;
+ DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Start");
+ } else if (eap_wsc->opcode == WSC_OPCODE_DONE) {
+ dhd->conf->eapol_status = EAPOL_STATUS_WSC_DONE;
+ DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, WSC Done");
+ }
+}
+
+static void
+dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eap_header_fmt_t *eap_hdr;
+ char *ifname;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_hdr->code == EAP_CODE_REQUEST ||
+ eap_hdr->code == EAP_CODE_RESPONSE) {
+ bool isreq = (eap_hdr->code == EAP_CODE_REQUEST);
+ switch (eap_hdr->type) {
+ case EAP_TYPE_IDENT:
+ if (isreq) {
+ dhd->conf->eapol_status = EAPOL_STATUS_REQID;
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, Identity");
+ } else {
+ dhd->conf->eapol_status = EAPOL_STATUS_RSPID;
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, Identity");
+ }
+ break;
+ case EAP_TYPE_TLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TLS");
+ }
+ break;
+ case EAP_TYPE_LEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, LEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, LEAP");
+ }
+ break;
+ case EAP_TYPE_TTLS:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, TTLS");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, TTLS");
+ }
+ break;
+ case EAP_TYPE_AKA:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKA");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKA");
+ }
+ break;
+ case EAP_TYPE_PEAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PEAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PEAP");
+ }
+ break;
+ case EAP_TYPE_FAST:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, FAST");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, FAST");
+ }
+ break;
+ case EAP_TYPE_PSK:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, PSK");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, PSK");
+ }
+ break;
+ case EAP_TYPE_AKAP:
+ if (isreq) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Request, AKAP");
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Response, AKAP");
+ }
+ break;
+ case EAP_TYPE_EXP:
+ dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ break;
+ default:
+ break;
+ }
+ } else if (eap_hdr->code == EAP_CODE_SUCCESS) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Success");
+ } else if (eap_hdr->code == EAP_CODE_FAILURE) {
+ DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, Failure");
+ }
+}
+
+static void
+dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ eapol_header_t *eapol_hdr;
+ eapol_key_hdr_t *eap_key;
+ msg_eapol_t type;
+ char *ifname;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ type = dhd_is_4way_msg(pktdata);
+ ifname = dhd_ifname(dhd, ifidx);
+ eapol_hdr = (eapol_header_t *)pktdata;
+ eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eap_key->type != EAPOL_WPA2_KEY) {
+ EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY");
+ return;
+ }
+
+ switch (type) {
+ case EAPOL_4WAY_M1:
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M1;
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
+ break;
+ case EAPOL_4WAY_M2:
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M2;
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
+ break;
+ case EAPOL_4WAY_M3:
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M3;
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
+ break;
+ case EAPOL_4WAY_M4:
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M4;
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond);
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
+ break;
+ case EAPOL_GROUPKEY_M1:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1");
+ break;
+ case EAPOL_GROUPKEY_M2:
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond);
+ EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2");
+ if (ifidx == 0 && tx && pktfate) {
+ dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP);
+ }
+ break;
+ default:
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 4WAY");
+ break;
+ }
+}
+
+void
+dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
+{
+ char *ifname;
+ eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata;
+ bool cond;
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eapol_hdr = (eapol_header_t *)pktdata;
+ ifname = dhd_ifname(dhd, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+
+ if (eapol_hdr->type == EAP_PACKET) {
+ dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx,
+ pkthash, pktfate);
+ } else if (eapol_hdr->type == EAPOL_START) {
+ DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond);
+ EAP_PRINT("EAP Packet, EAPOL-Start");
+ } else if (eapol_hdr->type == EAPOL_KEY) {
+ dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx,
+ pkthash, pktfate);
+ } else {
+ DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
+ EAP_PRINT_OTHER("OTHER 8021X");
+ }
+}
+#endif /* DHD_8021X_DUMP */
+
+#ifdef DHD_DHCP_DUMP
+#define BOOTP_CHADDR_LEN 16
+#define BOOTP_SNAME_LEN 64
+#define BOOTP_FILE_LEN 128
+#define BOOTP_MIN_DHCP_OPT_LEN 312
+#define BOOTP_MAGIC_COOKIE_LEN 4
+
+#define DHCP_MSGTYPE_DISCOVER 1
+#define DHCP_MSGTYPE_OFFER 2
+#define DHCP_MSGTYPE_REQUEST 3
+#define DHCP_MSGTYPE_DECLINE 4
+#define DHCP_MSGTYPE_ACK 5
+#define DHCP_MSGTYPE_NAK 6
+#define DHCP_MSGTYPE_RELEASE 7
+#define DHCP_MSGTYPE_INFORM 8
+
+#define DHCP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP(("[dhd-%s] "str " %s[%s] [TX] -" TXFATE_FMT "\n", \
+ ifname, typestr, opstr, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP(("[dhd-%s] "str" %s[%s] [RX]\n", \
+ ifname, typestr, opstr)); \
+ } \
+ } while (0)
+
+typedef struct bootp_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint8 op;
+ uint8 htype;
+ uint8 hlen;
+ uint8 hops;
+ uint32 transaction_id;
+ uint16 secs;
+ uint16 flags;
+ uint32 client_ip;
+ uint32 assigned_ip;
+ uint32 server_ip;
+ uint32 relay_ip;
+ uint8 hw_address[BOOTP_CHADDR_LEN];
+ uint8 server_name[BOOTP_SNAME_LEN];
+ uint8 file_name[BOOTP_FILE_LEN];
+ uint8 options[BOOTP_MIN_DHCP_OPT_LEN];
+} PACKED_STRUCT bootp_fmt_t;
+
+static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
+static char dhcp_ops[][10] = {
+ "NA", "REQUEST", "REPLY"
+};
+static char dhcp_types[][10] = {
+ "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
+};
+
+static const int dhcp_types_stat[9] = {
+ ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST),
+ ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE),
+ ST(DHCP_INFORM)
+};
+
+void
+dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &b->iph;
+ uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len);
+ int dhcp_type = 0, len, opt_len;
+ char *ifname = NULL, *typestr = NULL, *opstr = NULL;
+ bool cond;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_UDP) {
+ return;
+ }
+
+ /* check UDP port for bootp (67, 68) */
+ if (b->udph.src_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.src_port != htons(DHCP_PORT_CLIENT) &&
+ b->udph.dst_port != htons(DHCP_PORT_SERVER) &&
+ b->udph.dst_port != htons(DHCP_PORT_CLIENT)) {
+ return;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr);
+ opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) -
+ sizeof(struct bcmudp_hdr) - sizeof(b->options));
+
+ /* parse bootp options */
+ if (opt_len >= BOOTP_MAGIC_COOKIE_LEN &&
+ !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) {
+ ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN];
+ while (ptr < end && *ptr != 0xff) {
+ opt = ptr++;
+ if (*opt == 0) {
+ continue;
+ }
+ ptr += *ptr + 1;
+ if (ptr >= end) {
+ break;
+ }
+ if (*opt == DHCP_OPT_MSGTYPE) {
+ if (opt[1]) {
+ dhcp_type = opt[2];
+ typestr = dhcp_types[dhcp_type];
+ opstr = dhcp_ops[b->op];
+ DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type],
+ ifidx, tx, cond);
+ DHCP_PRINT("DHCP");
+ break;
+ }
+ }
+ }
+ }
+}
+#endif /* DHD_DHCP_DUMP */
+
+#ifdef DHD_ICMP_DUMP
+#define ICMP_TYPE_DEST_UNREACH 3
+#define ICMP_ECHO_SEQ_OFFSET 6
+#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET)))
+#define ICMP_PING_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) SEQNUM=%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) SEQNUM=%d\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum)); \
+ } \
+ } while (0)
+
+#define ICMP_PRINT(str) \
+ do { \
+ if (tx) { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) TYPE=%d, CODE=%d" \
+ TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) TYPE=%d," \
+ " CODE=%d\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code)); \
+ } \
+ } while (0)
+
+void
+dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt;
+ struct bcmicmp_hdr *icmph;
+ char *ifname;
+ bool cond;
+ uint16 seqnum, type, code;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_ICMP) {
+ return;
+ }
+
+ /* check header length */
+ if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr));
+ seqnum = 0;
+ type = icmph->type;
+ code = icmph->code;
+ bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
+ if (type == ICMP_TYPE_ECHO_REQUEST) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REQUEST");
+ } else if (type == ICMP_TYPE_ECHO_REPLY) {
+ seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond);
+ ICMP_PING_PRINT("PING REPLY ");
+ } else if (type == ICMP_TYPE_DEST_UNREACH) {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond);
+ ICMP_PRINT("ICMP DEST UNREACH");
+ } else {
+ DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond);
+ ICMP_PRINT("ICMP OTHER");
+ }
+}
+#endif /* DHD_ICMP_DUMP */
+
+#ifdef DHD_ARP_DUMP
+#define ARP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)" TXFATE_FMT "\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)" TXFATE_FMT "\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s)\n", \
+ ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
+ tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \
+ } \
+ } while (0) \
+
+#define ARP_PRINT_OTHER(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[dhd-%s] "str " [TX] op_code=%d" \
+ TXFATE_FMT "\n", ifname, opcode, \
+ TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] op_code=%d" \
+ TXFATE_FMT "\n", ifname, opcode, \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] op_code=%d\n", \
+ ifname, opcode)); \
+ } \
+ } while (0)
+
+void
+dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct bcmarp *arph = (struct bcmarp *)pkt;
+ char *ifname;
+ uint16 opcode;
+ bool cond, dump_enabled;
+ char sabuf[20]="", dabuf[20]="";
+ char seabuf[ETHER_ADDR_STR_LEN]="";
+ char deabuf[ETHER_ADDR_STR_LEN]="";
+
+ /* validation check */
+ if (arph->htype != hton16(HTYPE_ETHERNET) ||
+ arph->hlen != ETHER_ADDR_LEN ||
+ arph->plen != 4) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ opcode = ntoh16(arph->oper);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ bcm_ip_ntoa((struct ipv4_addr *)arph->src_ip, sabuf);
+ bcm_ip_ntoa((struct ipv4_addr *)arph->dst_ip, dabuf);
+ bcm_ether_ntoa((struct ether_addr *)arph->dst_eth, deabuf);
+ bcm_ether_ntoa((struct ether_addr *)arph->src_eth, seabuf);
+ if (opcode == ARP_OPC_REQUEST) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond);
+ ARP_PRINT("ARP REQUEST ");
+ } else if (opcode == ARP_OPC_REPLY) {
+ DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond);
+ ARP_PRINT("ARP RESPONSE");
+ } else {
+ ARP_PRINT_OTHER("ARP OTHER");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP);
+ }
+}
+#endif /* DHD_ARP_DUMP */
+
+#ifdef DHD_DNS_DUMP
+typedef struct dns_fmt {
+ struct ipv4_hdr iph;
+ struct bcmudp_hdr udph;
+ uint16 id;
+ uint16 flags;
+ uint16 qdcount;
+ uint16 ancount;
+ uint16 nscount;
+ uint16 arcount;
+} PACKED_STRUCT dns_fmt_t;
+
+#define UDP_PORT_DNS 53
+#define DNS_QR_LOC 15
+#define DNS_OPCODE_LOC 11
+#define DNS_RCODE_LOC 0
+#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC))
+#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC))
+#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC))
+#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC))
+#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC))
+#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC))
+#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6)))
+
+static const char dns_opcode_types[][11] = {
+ "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE"
+};
+
+#define DNSOPCODE(op) \
+ (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op])
+
+#define DNS_REQ_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] ID:0x%04X OPCODE:%s\n", \
+ ifname, id, DNSOPCODE(opcode))); \
+ } \
+ } while (0)
+
+#define DNS_RESP_PRINT(str) \
+ do { \
+ if (tx) { \
+ if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
+ DHD_PKTDUMP(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s RCODE:%d" \
+ TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \
+ GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \
+ TX_FATE(pktfate))); \
+ } \
+ } else { \
+ DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] ID:0x%04X OPCODE:%s RCODE:%d\n", \
+ ifname, id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \
+ } \
+ } while (0)
+
+void
+dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate)
+{
+ dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN];
+ struct ipv4_hdr *iph = &dnsh->iph;
+ uint16 flags, opcode, id;
+ char *ifname;
+ bool cond, dump_enabled;
+
+ /* check IP header */
+ if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
+ IP_VER(iph) != IP_VER_4 ||
+ IPV4_PROT(iph) != IP_PROT_UDP) {
+ return;
+ }
+
+ /* check UDP port for DNS */
+ if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) &&
+ dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) {
+ return;
+ }
+
+ /* check header length */
+ if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) +
+ sizeof(struct bcmudp_hdr))) {
+ return;
+ }
+
+ ifname = dhd_ifname(dhdp, ifidx);
+ cond = (tx && pktfate) ? FALSE : TRUE;
+ dump_enabled = dhd_dump_pkt_enabled(dhdp);
+ flags = hton16(dnsh->flags);
+ opcode = GET_DNS_OPCODE(flags);
+ id = hton16(dnsh->id);
+ if (GET_DNS_QR(flags)) {
+ /* Response */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond);
+ DNS_RESP_PRINT("DNS RESPONSE");
+ } else {
+ /* Request */
+ DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond);
+ DNS_REQ_PRINT("DNS REQUEST");
+ }
+
+ if (ifidx == 0) {
+ dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS);
+ }
+}
+#endif /* DHD_DNS_DUMP */
+
+#ifdef DHD_TRX_DUMP
+void
+dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, bool tx)
+{
+ struct ether_header *eh;
+ uint16 protocol;
+ char *pkttype = "UNKNOWN";
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!pktdata) {
+ DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ eh = (struct ether_header *)pktdata;
+ protocol = hton16(eh->ether_type);
+ BCM_REFERENCE(pktlen);
+
+ switch (protocol) {
+ case ETHER_TYPE_IP:
+ pkttype = "IP";
+ break;
+ case ETHER_TYPE_ARP:
+ pkttype = "ARP";
+ break;
+ case ETHER_TYPE_BRCM:
+ pkttype = "BRCM";
+ break;
+ case ETHER_TYPE_802_1X:
+ pkttype = "802.1X";
+ break;
+ case ETHER_TYPE_WAI:
+ pkttype = "WAPI";
+ break;
+ default:
+ break;
+ }
+
+ if (protocol != ETHER_TYPE_BRCM) {
+ if (pktdata[0] == 0xFF) {
+ DHD_PKTDUMP(("[dhd-%s] %s BROADCAST DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ } else if (pktdata[0] & 1) {
+ DHD_PKTDUMP(("[dhd-%s] %s MULTICAST DUMP " MACDBG " - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", MAC2STRDBG(pktdata), pkttype));
+ } else {
+ DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ }
+#ifdef DHD_RX_FULL_DUMP
+ prhex("Data", pktdata, pktlen);
+#endif /* DHD_RX_FULL_DUMP */
+ }
+ else {
+ DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n",
+ dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
+ }
+}
+#endif /* DHD_RX_DUMP */
--- /dev/null
+/*
+ * Header file for the Packet dump helper functions
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_pktdump.h 820929 2019-05-21 14:09:11Z $
+ */
+
+#ifndef __DHD_LINUX_PKTDUMP_H_
+#define __DHD_LINUX_PKTDUMP_H_
+
+#include <typedefs.h>
+#include <dhd.h>
+
+typedef enum {
+ EAPOL_OTHER = 0,
+ EAPOL_4WAY_M1,
+ EAPOL_4WAY_M2,
+ EAPOL_4WAY_M3,
+ EAPOL_4WAY_M4,
+ EAPOL_GROUPKEY_M1,
+ EAPOL_GROUPKEY_M2
+} msg_eapol_t;
+
+typedef enum pkt_cnt_rsn {
+ PKT_CNT_RSN_INVALID = 0,
+ PKT_CNT_RSN_ROAM = 1,
+ PKT_CNT_RSN_GRPKEY_UP = 2,
+ PKT_CNT_RSN_CONNECT = 3,
+ PKT_CNT_RSN_MAX = 4
+} pkt_cnt_rsn_t;
+
+extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata);
+extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
+ uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+
+#ifdef DHD_PKTDUMP_ROAM
+extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn);
+extern void dhd_dump_pkt_init(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp);
+extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { }
+static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { }
+static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { }
+#endif /* DHD_PKTDUMP_ROAM */
+
+/* Rx packet dump */
+#ifdef DHD_TRX_DUMP
+extern void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx);
+#else
+static INLINE void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx) { }
+#endif /* DHD_TRX_DUMP */
+
+/* DHCP packet dump */
+#ifdef DHD_DHCP_DUMP
+extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DHCP_DUMP */
+
+/* DNS packet dump */
+#ifdef DHD_DNS_DUMP
+extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_DNS_DUMP */
+
+/* ICMP packet dump */
+#ifdef DHD_ICMP_DUMP
+extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ICMP_DUMP */
+
+/* ARP packet dump */
+#ifdef DHD_ARP_DUMP
+extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
+ uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx,
+ uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_ARP_DUMP */
+
+/* 802.1X packet dump */
+#ifdef DHD_8021X_DUMP
+extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
+#else
+static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
+ uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { }
+#endif /* DHD_8021X_DUMP */
+
+#endif /* __DHD_LINUX_PKTDUMP_H_ */
/*
* Linux platform device for DHD WLAN adapter
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_platdev.c 765775 2018-06-05 10:10:56Z $
+ * $Id: dhd_linux_platdev.c 805835 2019-02-20 12:35:44Z $
*/
#include <typedefs.h>
#include <linux/kernel.h>
}
plat_data = adapter->wifi_plat_data;
- DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec));
if (plat_data->set_power) {
#ifdef ENABLE_4335BT_WAR
if (on) {
DHD_TRACE(("%s\n", __FUNCTION__));
if (plat_data->get_country_code) {
-#ifdef CUSTOM_COUNTRY_CODE
+#ifdef CUSTOM_FORCE_NODFS_FLAG
return plat_data->get_country_code(ccode, flags);
#else
return plat_data->get_country_code(ccode);
-#endif /* CUSTOM_COUNTRY_CODE */
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
wifi_platform_bus_enumerate(adapter, FALSE);
}
+#else
+ /* x86 bring-up PC needs no power-up operations */
+ err = dhd_bus_register();
#endif // endif
return err;
#ifdef BCMDBUS
static int dhd_wifi_platform_load_usb(void)
{
+ int err = 0;
+#if !defined(DHD_PRELOAD)
wifi_adapter_info_t *adapter;
s32 timeout = -1;
int i;
- int err = 0;
enum wifi_adapter_status wait_status;
+#endif
err = dhd_bus_register();
if (err) {
goto exit;
}
+#if !defined(DHD_PRELOAD)
/* power up all adapters */
for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
adapter = &dhd_wifi_platdata->adapters[i];
goto fail;
}
}
+#endif
exit:
return err;
+#if !defined(DHD_PRELOAD)
fail:
dhd_bus_unregister();
/* power down all adapters */
}
return err;
+#endif
}
#else /* BCMDBUS */
static int dhd_wifi_platform_load_usb(void)
--- /dev/null
+/*
+ * DHD Linux header file - contains private structure definition of the Linux specific layer
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_priv.h 815919 2019-04-22 09:06:50Z $
+ */
+
+#ifndef __DHD_LINUX_PRIV_H__
+#define __DHD_LINUX_PRIV_H__
+
+#include <osl.h>
+
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif /* CONFIG COMPAT */
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_linux.h>
+#include <dhd_bus.h>
+
+#ifdef PCIE_FULL_DONGLE
+#include <bcmmsgbuf.h>
+#include <dhd_flowring.h>
+#endif /* PCIE_FULL_DONGLE */
+
+/*
+ * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
+ * Local private structure (extension of pub)
+ */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+ dhd_pub_t pub;
+ /* for supporting multiple interfaces.
+ * static_ifs hold the net ifaces without valid FW IF
+ */
+ dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
+
+ wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
+ char fw_path[PATH_MAX]; /* path to firmware image */
+ char nv_path[PATH_MAX]; /* path to nvram vars file */
+ char clm_path[PATH_MAX]; /* path to clm vars file */
+ char conf_path[PATH_MAX]; /* path to config vars file */
+#ifdef DHD_UCODE_DOWNLOAD
+ char uc_path[PATH_MAX]; /* path to ucode image */
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ /* serialize dhd iovars */
+ struct mutex dhd_iovar_mutex;
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+
+#ifdef BCMDBUS
+ ulong wlfc_lock_flags;
+ ulong wlfc_pub_lock_flags;
+#endif /* BCMDBUS */
+#endif /* PROP_TXSTATUS */
+ wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t d3ack_wait;
+ wait_queue_head_t dhd_bus_busy_state_wait;
+ wait_queue_head_t dmaxfer_wait;
+ uint32 default_wd_interval;
+
+ timer_list_compat_t timer;
+ bool wd_timer_valid;
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+#ifdef BCMDBUS
+ ulong txqlock_flags;
+#else
+
+ struct semaphore sdsem;
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+#endif /* BCMDBUS */
+
+ tsk_ctl_t thr_rxf_ctl;
+ spinlock_t rxf_lock;
+ bool rxthread_enabled;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK)
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+ struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+ struct wake_lock wl_evtwake; /* Wifi event wakelock */
+ struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
+ struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ struct wake_lock wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ struct wake_lock wl_scanwake; /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+ struct mutex dhd_suspend_mutex;
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ struct mutex dhd_apf_mutex;
+#endif /* PKT_FILTER_SUPPORT && APF */
+ spinlock_t wakelock_spinlock;
+ spinlock_t wakelock_evt_spinlock;
+ uint32 wakelock_counter;
+ int wakelock_wd_counter;
+ int wakelock_rx_timeout_enable;
+ int wakelock_ctrl_timeout_enable;
+ bool waive_wakelock;
+ uint32 wakelock_before_waive;
+
+ /* Thread to issue ioctl for multicast */
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+ dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef DHDTCPACK_SUPPRESS
+ spinlock_t tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+ bool cpufreq_fix_status;
+ struct mutex cpufreq_fix;
+ struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+ struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
+ void *dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+ struct notifier_block freq_trans;
+ int __percpu *new_freq;
+#endif // endif
+ unsigned int unit;
+ struct notifier_block pm_notifier;
+#ifdef DHD_PSTA
+ uint32 psta_mode; /* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ uint32 wet_mode;
+#endif /* DHD_WET */
+#ifdef DHD_DEBUG
+ dhd_dump_t *dump;
+ struct timer_list join_timer;
+ u32 join_timeout_val;
+ bool join_timer_active;
+ uint scan_time_count;
+ struct timer_list scan_timer;
+ bool scan_timer_active;
+#endif // endif
+#if defined(DHD_LB)
+ /* CPU Load Balance dynamic CPU selection */
+
+ /* Variable that tracks the currect CPUs available for candidacy */
+ cpumask_var_t cpumask_curr_avail;
+
+ /* Primary and secondary CPU mask */
+ cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+ cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+ struct notifier_block cpu_notifier;
+
+ /* Tasklet to handle Tx Completion packet freeing */
+ struct tasklet_struct tx_compl_tasklet;
+ atomic_t tx_compl_cpu;
+
+ /* Tasklet to handle RxBuf Post during Rx completion */
+ struct tasklet_struct rx_compl_tasklet;
+ atomic_t rx_compl_cpu;
+
+ /* Napi struct for handling rx packet sendup. Packets are removed from
+ * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+ * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+ * to run to rx_napi_cpu.
+ */
+ struct sk_buff_head rx_pend_queue ____cacheline_aligned;
+ struct sk_buff_head rx_napi_queue ____cacheline_aligned;
+ struct napi_struct rx_napi_struct ____cacheline_aligned;
+ atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
+ struct net_device *rx_napi_netdev; /* netdev of primary interface */
+
+ struct work_struct rx_napi_dispatcher_work;
+ struct work_struct tx_compl_dispatcher_work;
+ struct work_struct tx_dispatcher_work;
+ struct work_struct rx_compl_dispatcher_work;
+
+ /* Number of times DPC Tasklet ran */
+ uint32 dhd_dpc_cnt;
+ /* Number of times NAPI processing got scheduled */
+ uint32 napi_sched_cnt;
+ /* Number of times NAPI processing ran on each available core */
+ uint32 *napi_percpu_run_cnt;
+ /* Number of times RX Completions got scheduled */
+ uint32 rxc_sched_cnt;
+ /* Number of times RX Completion ran on each available core */
+ uint32 *rxc_percpu_run_cnt;
+ /* Number of times TX Completions got scheduled */
+ uint32 txc_sched_cnt;
+ /* Number of times TX Completions ran on each available core */
+ uint32 *txc_percpu_run_cnt;
+ /* CPU status */
+ /* Number of times each CPU came online */
+ uint32 *cpu_online_cnt;
+ /* Number of times each CPU went offline */
+ uint32 *cpu_offline_cnt;
+
+ /* Number of times TX processing run on each core */
+ uint32 *txp_percpu_run_cnt;
+ /* Number of times TX start run on each core */
+ uint32 *tx_start_percpu_run_cnt;
+
+ /* Tx load balancing */
+
+ /* TODO: Need to see if batch processing is really required in case of TX
+ * processing. In case of RX the Dongle can send a bunch of rx completions,
+ * hence we took a 3 queue approach
+ * enque - adds the skbs to rx_pend_queue
+ * dispatch - uses a lock and adds the list of skbs from pend queue to
+ * napi queue
+ * napi processing - copies the pend_queue into a local queue and works
+ * on it.
+ * But for TX its going to be 1 skb at a time, so we are just thinking
+ * of using only one queue and use the lock supported skb queue functions
+ * to add and process it. If its in-efficient we'll re-visit the queue
+ * design.
+ */
+
+ /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
+ /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
+ /*
+ * From the Tasklet that actually sends out data
+ * copy the list tx_pend_queue into tx_active_queue. There by we need
+ * to spinlock to only perform the copy the rest of the code ie to
+ * construct the tx_pend_queue and the code to process tx_active_queue
+ * can be lockless. The concept is borrowed as is from RX processing
+ */
+ /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
+
+ /* Control TXP in runtime, enable by default */
+ atomic_t lb_txp_active;
+
+ /* Control RXP in runtime, enable by default */
+ atomic_t lb_rxp_active;
+
+ /*
+ * When the NET_TX tries to send a TX packet put it into tx_pend_queue
+ * For now, the processing tasklet will also direcly operate on this
+ * queue
+ */
+ struct sk_buff_head tx_pend_queue ____cacheline_aligned;
+
+ /* Control RXP in runtime, enable by default */
+ /* cpu on which the DHD Tx is happenning */
+ atomic_t tx_cpu;
+
+ /* CPU on which the Network stack is calling the DHD's xmit function */
+ atomic_t net_tx_cpu;
+
+ /* Tasklet context from which the DHD's TX processing happens */
+ struct tasklet_struct tx_tasklet;
+
+ /*
+ * Consumer Histogram - NAPI RX Packet processing
+ * -----------------------------------------------
+ * On Each CPU, when the NAPI RX Packet processing call back was invoked
+ * how many packets were processed is captured in this data structure.
+ * Now its difficult to capture the "exact" number of packets processed.
+ * So considering the packet counter to be a 32 bit one, we have a
+ * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+ * processed is rounded off to the next power of 2 and put in the
+ * approriate "bin" the value in the bin gets incremented.
+ * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+ * and the packet count processed is as follows (assume the bin counters are 0)
+ * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+ * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+ * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+ */
+ uint32 *napi_rx_hist[HIST_BIN_SIZE];
+ uint32 *txc_hist[HIST_BIN_SIZE];
+ uint32 *rxc_hist[HIST_BIN_SIZE];
+#endif /* DHD_LB */
+#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
+ struct work_struct axi_error_dispatcher_work;
+#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#ifdef SHOW_LOGTRACE
+#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
+ tsk_ctl_t thr_logtrace_ctl;
+#else
+ struct delayed_work event_log_dispatcher_work;
+#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
+#endif /* SHOW_LOGTRACE */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+ struct kobject dhd_kobj;
+ struct kobject dhd_conf_file_kobj;
+ struct timer_list timesync_timer;
+#if defined(BT_OVER_SDIO)
+ char btfw_path[PATH_MAX];
+#endif /* defined (BT_OVER_SDIO) */
+#ifdef WL_MONITOR
+ struct net_device *monitor_dev; /* monitor pseudo device */
+ struct sk_buff *monitor_skb;
+ uint monitor_len;
+ uint monitor_type; /* monitor pseudo device */
+#endif /* WL_MONITOR */
+#if defined(BT_OVER_SDIO)
+ struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
+ int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
+#endif /* BT_OVER_SDIO */
+#ifdef SHOW_LOGTRACE
+ struct sk_buff_head evt_trace_queue ____cacheline_aligned;
+#endif // endif
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ struct workqueue_struct *tx_wq;
+ struct workqueue_struct *rx_wq;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_DEBUG_UART
+ bool duart_execute;
+#endif /* DHD_DEBUG_UART */
+ struct mutex logdump_lock;
+ /* indicates mem_dump was scheduled as work queue or called directly */
+ bool scheduled_memdump;
+ struct work_struct dhd_hang_process_work;
+#ifdef DHD_HP2P
+ spinlock_t hp2p_lock;
+#endif /* DHD_HP2P */
+} dhd_info_t;
+
+extern int dhd_sysfs_init(dhd_info_t *dhd);
+extern void dhd_sysfs_exit(dhd_info_t *dhd);
+extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
+extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
+
+int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
+
+#if defined(DHD_LB)
+#if defined(DHD_LB_TXP)
+int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
+void dhd_tx_dispatcher_work(struct work_struct * work);
+void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
+void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_tx_handler(unsigned long data);
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+int dhd_napi_poll(struct napi_struct *napi, int budget);
+void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
+void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+#endif /* DHD_LB_RXP */
+
+void dhd_lb_set_default_cpus(dhd_info_t *dhd);
+void dhd_cpumasks_deinit(dhd_info_t *dhd);
+int dhd_cpumasks_init(dhd_info_t *dhd);
+
+void dhd_select_cpu_candidacy(dhd_info_t *dhd);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int dhd_cpu_startup_callback(unsigned int cpu);
+int dhd_cpu_teardown_callback(unsigned int cpu);
+#else
+int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
+#endif /* LINUX_VERSION_CODE < 4.10.0 */
+
+int dhd_register_cpuhp_callback(dhd_info_t *dhd);
+int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
+
+#if defined(DHD_LB_TXC)
+void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
+void dhd_rx_compl_dispatcher_fn(struct work_struct * work);
+#endif /* DHD_LB_RXC */
+
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
+#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#endif /* __DHD_LINUX_PRIV_H__ */
/*
* Expose some of the kernel scheduler routines
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $
+ * $Id: dhd_linux_sched.c 815919 2019-04-22 09:06:50Z $
*/
#include <linux/kernel.h>
#include <linux/module.h>
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
int rc = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = sched_setscheduler(p, policy, param);
-#endif /* LinuxVer */
return rc;
}
int get_scheduler_policy(struct task_struct *p)
{
int rc = SCHED_NORMAL;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = p->policy;
-#endif /* LinuxVer */
return rc;
}
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $
+ * $Id: dhd_linux_wq.c 815919 2019-04-22 09:06:50Z $
*/
#include <linux/init.h>
struct kfifo *fifo;
gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
- fifo = kfifo_init(buf, size, flags, lock);
-#else
fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
if (!fifo) {
return NULL;
}
kfifo_init(fifo, buf, size);
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
return fifo;
}
dhd_kfifo_free(struct kfifo *fifo)
{
kfifo_free(fifo);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
- /* FC11 releases the fifo memory */
- kfree(fifo);
-#endif // endif
}
/* deferred work functions */
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_wq.h 769906 2018-06-28 11:52:31Z $
+ * $Id: dhd_linux_wq.h 814378 2019-04-11 02:21:31Z $
*/
#ifndef _dhd_linux_wq_h_
#define _dhd_linux_wq_h_
DHD_WQ_WORK_SET_MCAST_LIST,
DHD_WQ_WORK_IPV6_NDO,
DHD_WQ_WORK_HANG_MSG,
- DHD_WQ_WORK_SSSR_DUMP,
DHD_WQ_WORK_DHD_LOG_DUMP,
DHD_WQ_WORK_PKTLOG_DUMP,
DHD_WQ_WORK_INFORM_DHD_MON,
DHD_WQ_WORK_ERROR_RECOVERY,
#endif /* DHD_ERPOM */
DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
+ DHD_WQ_WORK_AXI_ERROR_DUMP,
+ DHD_WQ_WORK_CTO_RECOVERY,
#ifdef DHD_UPDATE_INTF_MAC
DHD_WQ_WORK_IF_UPDATE,
#endif /* DHD_UPDATE_INTF_MAC */
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
hdr.count = p->hdr.count + 1;
/* exclude LSB 2 bits which indicate binary/non-binary data */
hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
}
lastMessages = TRUE;
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+ bzero(&hdr, sizeof(hdr));
hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
hdr.count = p->hdr.count + 1;
/* exclude LSB 2 bits which indicate binary/non-binary data */
hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
+ hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
+ if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
+ hdr.binary_payload = TRUE;
+ }
dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
} else {
msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr;
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_msgbuf.c 769906 2018-06-28 11:52:31Z $
+ * $Id: dhd_msgbuf.c 825801 2019-06-17 10:51:10Z $
*/
#include <typedefs.h>
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
+#include <bcmstdlib_s.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <hnd_armtrap.h>
#include <dnglevent.h>
+#ifdef DHD_EWPR_VER2
+#include <dhd_bitpack.h>
+#endif /* DHD_EWPR_VER2 */
+
extern char dhd_version[];
extern char fw_version[];
* Dongle advertizes host side sync mechanism requirements.
*/
-#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
-#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL)
+#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
+#define HWA_DB_TYPE_RXPOST (0x0050)
+#define HWA_DB_TYPE_TXCPLT (0x0060)
+#define HWA_DB_TYPE_RXCPLT (0x0170)
+#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
+
+#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
+#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
+#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
+
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ * For EDL messages.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+#ifdef EWP_EDL
+typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg);
+#endif /* EWP_EDL */
+
/*
* +----------------------------------------------------------------------------
*
#define DHD_DMA_PAD (128)
#endif // endif
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+ (ring)++, (flowid)++)
+
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
dhd_dma_buf_t srcmem;
uint32 len;
bool in_progress;
uint64 start_usec;
+ uint64 time_taken;
uint32 d11_lpbk;
int status;
} dhd_dmaxfer_t;
#endif /* TXP_FLUSH_NITEMS */
uint8 ring_type;
+ uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
uint8 n_completion_ids;
bool create_pending;
uint16 create_req_id;
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
+ uint16 rxbufpost_sz;
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint32 flowring_num;
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+#ifdef EWP_EDL
+ d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
+#endif /* EWP_EDL */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
void *pktid_tx_map; /* pktid map for tx path */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
+#ifdef DHD_MAP_PKTID_LOGGING
+ void *pktid_dma_map; /* pktid map for DMA MAP */
+ void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
+#endif /* DHD_MAP_PKTID_LOGGING */
+ uint32 pktid_depleted_cnt; /* pktid depleted count */
+ /* netif tx queue stop count */
+ uint8 pktid_txq_stop_cnt;
+ /* netif tx queue start count */
+ uint8 pktid_txq_start_cnt;
+ uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
+ uint64 ioctl_ack_time; /* timestamp for ioctl ack */
+ uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
bool no_aggr;
bool fixed_rate;
dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
+#ifdef DHD_HP2P
+ msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
+ msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
+#endif /* DHD_HP2P */
+ bool no_tx_resource;
} dhd_prot_t;
+#ifdef DHD_EWPR_VER2
+#define HANG_INFO_BASE64_BUFFER_SIZE 640
+#endif // endif
+
#ifdef DHD_DUMP_PCIE_RINGS
static
-int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn);
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn);
+#ifdef EWP_EDL
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn);
+#endif /* EWP_EDL */
#endif /* DHD_DUMP_PCIE_RINGS */
extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
+#ifdef DHD_HP2P
+static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
#ifdef EWP_EDL
static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
#endif // endif
static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
+#ifdef DHD_HP2P
+static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
+static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
+static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
+static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
+#endif // endif
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
uint16 ring_type, uint32 id);
static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
uint8 type, uint32 id);
-static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
{
uint32 ring_seqnum = ring->seqnum;
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
DHD_ERROR((
"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
- " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
- ring->dma_buf.va, msg, ring->curr_rd));
+ ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
- dhd->livelock_occured = TRUE;
+ /* Try to resume if already suspended or suspend in progress */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ goto exit;
+ }
+
dhd_bus_dump_console_buffer(dhd->bus);
dhd_prot_debug_info_print(dhd);
}
#endif /* DHD_FW_COREDUMP */
+exit:
dhd_schedule_reset(dhd);
+ dhd->livelock_occured = TRUE;
}
/**
msg_seqnum = *marker;
if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
}
- total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+ total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
if (total_tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = total_tries;
num_words);
if (prot_checksum == 0U) { /* checksum is OK */
ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to
+ * LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
}
}
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
- return msg->msg_type;
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return MSG_TYPE_INVALID;
+ } else {
+ return msg->msg_type;
+ }
}
#ifdef EWP_EDL
if (valid_msg) {
/* data is OK */
ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
+ goto dma_completed;
+ }
}
} else {
DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
prot->d2h_sync_wait_tot += tries;
return BCME_OK;
}
+
+/**
+ * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static int BCMFASTPATH
+dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg)
+{
+ /* Check for LIVELOCK induce flag, which is set by firing
+ * dhd iovar to induce LIVELOCK error. If flag is set,
+ * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
+ DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
+ return BCME_ERROR;
+ } else {
+ if (msg->msg_type == MSG_TYPE_INFO_PYLD)
+ return BCME_OK;
+ else
+ return msg->msg_type;
+ }
+}
#endif /* EWP_EDL */
INLINE void
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+ if (HWA_ACTIVE(dhd)) {
+ prot->d2hring_tx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
+ prot->d2hring_rx_cpln.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
+ DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
+ prot->d2hring_rx_cpln.hwa_db_type));
+ }
+
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+#ifdef EWP_EDL
+ prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
+#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
}
}
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+
+ if (HWA_ACTIVE(dhd)) {
+ prot->h2dring_rxp_subn.hwa_db_type =
+ (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
+ DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
+ __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
+ }
+
prot->h2dring_rxp_subn.current_phase = 0;
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
+/*
+ * +---------------------------------------------------------------------------+
+ * DHD_MAP_PKTID_LOGGING
+ * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
+ * debugging in customer platform.
+ * +---------------------------------------------------------------------------+
+ */
+
+#ifdef DHD_MAP_PKTID_LOGGING
+typedef struct dhd_pktid_log_item {
+ dmaaddr_t pa; /* DMA bus address */
+ uint64 ts_nsec; /* Timestamp: nsec */
+ uint32 size; /* DMA map/unmap size */
+ uint32 pktid; /* Packet ID */
+ uint8 pkttype; /* Packet Type */
+ uint8 rsvd[7]; /* Reserved for future use */
+} dhd_pktid_log_item_t;
+
+typedef struct dhd_pktid_log {
+ uint32 items; /* number of total items */
+ uint32 index; /* index of pktid_log_item */
+ dhd_pktid_log_item_t map[0]; /* metadata storage */
+} dhd_pktid_log_t;
+
+typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
+
+#define MAX_PKTID_LOG (2048)
+#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
+#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
+ ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
+
+#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
+#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
+#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
+ dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
+#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
+
+static dhd_pktid_log_handle_t *
+dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ log_size = DHD_PKTID_LOG_SZ(num_items);
+ log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
+ if (log == NULL) {
+ DHD_ERROR(("%s: MALLOC failed for size %d\n",
+ __FUNCTION__, log_size));
+ return (dhd_pktid_log_handle_t *)NULL;
+ }
+
+ log->items = num_items;
+ log->index = 0;
+
+ return (dhd_pktid_log_handle_t *)log; /* opaque handle */
+}
+
+static void
+dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
+{
+ dhd_pktid_log_t *log;
+ uint32 log_size;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ log_size = DHD_PKTID_LOG_SZ(log->items);
+ MFREE(dhd->osh, handle, log_size);
+}
+
+static void
+dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
+ uint32 pktid, uint32 len, uint8 pkttype)
+{
+ dhd_pktid_log_t *log;
+ uint32 idx;
+
+ if (handle == NULL) {
+ DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ log = (dhd_pktid_log_t *)handle;
+ idx = log->index;
+ log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
+ log->map[idx].pa = pa;
+ log->map[idx].pktid = pktid;
+ log->map[idx].size = len;
+ log->map[idx].pkttype = pkttype;
+ log->index = (idx + 1) % (log->items); /* update index */
+}
+
+void
+dhd_pktid_logging_dump(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ dhd_pktid_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ if (prot == NULL) {
+ DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
+ unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
+ OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
+ if (map_log && unmap_log) {
+ DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->index, unmap_log->index,
+ (unsigned long)ts_sec, (unsigned long)ts_usec));
+ DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
+ "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
+ }
+}
+#endif /* DHD_MAP_PKTID_LOGGING */
+
+/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
+
/*
* +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
#define DHD_PCIE_PKTID
#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
#define MAX_RX_PKTID (1024)
-#define MAX_TX_PKTID (3072 * 2)
+#define MAX_TX_PKTID (3072 * 12)
/* On Router, the pktptr serves as a pktid. */
PKTTYPE_TSBUF_RX
} dhd_pkttype_t;
+#define DHD_PKTID_MIN_AVAIL_COUNT 512U
+#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
#define DHD_PKTID_INVALID (0U)
#define DHD_IOCTL_REQ_PKTID (0xFFFE)
#define DHD_FAKE_PKTID (0xFACE)
#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
+#ifdef DHD_HP2P
+#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
+#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
+#endif /* DHD_HP2P */
#define IS_FLOWRING(ring) \
((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
-
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
out:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+
+ if (error != BCME_OK) {
+ dhd->pktid_audit_failed = TRUE;
+ }
+
return error;
}
const int test_for, void *msg, uint32 msg_len, const char *func)
{
int ret = BCME_OK;
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_ERROR;
+ }
+
ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
if (ret == BCME_ERROR) {
DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_RING */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
+ locker->pa, nkey, locker->len,
+ locker->pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
{
if (SECURE_DMA_ENAB(dhd->osh))
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
+ (uint32)locker->len, pkttype);
+#endif /* DHD_MAP_PKTID_LOGGING */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
dhd->dma_h2d_ring_upd_support = FALSE;
dhd->dma_ring_upd_overwrite = FALSE;
+ dhd->hwa_inited = 0;
dhd->idma_inited = 0;
dhd->ifrm_inited = 0;
dhd->dar_inited = 0;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
+#ifdef DHD_MAP_PKTID_LOGGING
+ prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_map == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
+ __FUNCTION__));
+ }
+
+ prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
+ if (prot->pktid_dma_unmap == NULL) {
+ DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
+ __FUNCTION__));
+ }
+#endif /* DHD_MAP_PKTID_LOGGING */
+
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
data |= HOSTCAP_FAST_DELETE_RING;
}
+ if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
+ DHD_ERROR(("HWA inited\n"));
+ /* TODO: Is hostcap needed? */
+ dhd->hwa_inited = TRUE;
+ }
+
if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
DHD_ERROR(("IDMA inited\n"));
data |= HOSTCAP_H2D_IDMA;
}
#endif /* EWP_EDL */
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable) {
+ data |= HOSTCAP_PKT_TIMESTAMP;
+ data |= HOSTCAP_PKT_HP2P;
+ DHD_ERROR(("Enable HP2P in host cap\n"));
+ } else {
+ DHD_ERROR(("HP2P not enabled in host cap\n"));
+ }
+#endif // endif
+
+#ifdef DHD_DB0TS
+ if (dhd->db0ts_capable) {
+ data |= HOSTCAP_DB0_TIMESTAMP;
+ DHD_ERROR(("Enable DB0 TS in host cap\n"));
+ } else {
+ DHD_ERROR(("DB0 TS not enabled in host cap\n"));
+ }
+#endif /* DHD_DB0TS */
+ if (dhd->extdtxs_in_txcpl) {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ data |= HOSTCAP_PKT_TXSTATUS;
+ }
+ else {
+ DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
+ }
+
DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
__FUNCTION__,
prot->active_ipc_version, prot->host_ipc_version,
dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
-
}
}
prot->device_ipc_version = dhd->bus->api.fw_rev;
prot->host_ipc_version = PCIE_SHARED_VERSION;
+ prot->no_tx_resource = FALSE;
/* Init the host API version */
dhd_set_host_cap(dhd);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
}
-
/* Signal to the dongle that common ring init is complete */
- dhd_bus_hostready(dhd->bus);
+ if (dhd->hostrdy_after_init)
+ dhd_bus_hostready(dhd->bus);
/*
* If the DMA-able buffers for flowring needs to come from a specific
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
- /* Post buffers for packet reception and ioctl/event responses */
- dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
}
#endif /* EWP_EDL */
+#ifdef DHD_HP2P
+ /* create HPP txcmpl/rxcmpl rings */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
+ if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_HP2P */
+
return BCME_OK;
} /* dhd_prot_init */
#ifdef EWP_EDL
dhd_prot_detach_edl_rings(dhd);
#endif // endif
+#ifdef DHD_HP2P
+ /* detach HPP rings */
+ dhd_prot_detach_hp2p_rings(dhd);
+#endif /* DHD_HP2P */
/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
* handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
#ifdef IOCTLRESP_USE_CONSTMEM
DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
#endif // endif
+#ifdef DHD_MAP_PKTID_LOGGING
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
+ DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
+#endif /* DHD_MAP_PKTID_LOGGING */
#if defined(DHD_LB_TXC)
if (prot->tx_compl_prod.buffer)
dhd_flow_rings_deinit(dhd);
}
+#ifdef DHD_HP2P
+ if (prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
+ }
+ if (prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
+ }
+#endif /* DHD_HP2P */
+
/* Reset PKTID map */
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#endif // endif
PKTFREE(dhd->osh, pkt, TRUE);
BCMPCIE_H2D_COMMON_MSGRINGS;
}
+ if (prot->d2hring_info_cpln) {
+ /* for d2hring re-entry case, clear inited flag */
+ prot->d2hring_info_cpln->inited = FALSE;
+ }
+
if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
}
if (ret != BCME_OK)
return ret;
+ prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_info_subn->current_phase = 0;
prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
prot->h2dring_info_subn->n_completion_ids = 1;
}
}
-#ifdef EWP_EDL
+#ifdef DHD_HP2P
static int
-dhd_check_create_edl_rings(dhd_pub_t *dhd)
+dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
int ret = BCME_ERROR;
uint16 ringid;
- {
- /* dongle may increase max_submission_rings so keep
- * ringid at end of dynamic rings (re-use info ring cpl ring id)
- */
- ringid = dhd->bus->max_tx_flowrings +
- (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
- BCMPCIE_H2D_COMMON_MSGRINGS + 1;
- }
+ /* Last 2 dynamic ring indices are used by hp2p rings */
+ ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
- if (prot->d2hring_edl) {
- return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
- }
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
- if (prot->d2hring_edl == NULL) {
- prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+ if (prot->d2hring_hp2p_txcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
- if (prot->d2hring_edl == NULL) {
- DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
+ DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
+ __FUNCTION__));
+ goto err2;
+ }
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_txcpl->inited = FALSE;
+ }
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_hp2p_rxcpl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
__FUNCTION__));
return BCME_NOMEM;
}
- DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
- ringid));
- ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
- D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
+ /* create the hp2p rx completion ring next to hp2p tx compl ring
+ * ringid = id next to hp2p tx compl ring
+ */
+ ringid = ringid + 1;
+
+ DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
+ dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
ringid);
if (ret != BCME_OK) {
- DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
+ DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
__FUNCTION__));
- goto err;
+ goto err1;
}
+ } else {
+ /* for re-entry case, clear inited flag */
+ prot->d2hring_hp2p_rxcpl->inited = FALSE;
}
return ret;
-err:
- MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
- prot->d2hring_edl = NULL;
+err1:
+ MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_rxcpl = NULL;
+err2:
+ MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ prot->d2hring_hp2p_txcpl = NULL;
return ret;
-} /* dhd_check_create_btlog_rings */
+} /* dhd_check_create_hp2p_rings */
int
-dhd_prot_init_edl_rings(dhd_pub_t *dhd)
+dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
- int ret = BCME_ERROR;
+ int ret = BCME_OK;
- if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
+ dhd->hp2p_ring_active = FALSE;
+
+ if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
+ DHD_ERROR(("%s: hp2p rings aren't created! \n",
+ __FUNCTION__));
+ return ret;
+ }
+
+ if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
+ DHD_INFO(("hp2p tx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
+ prot->d2hring_hp2p_txcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
+ DHD_INFO(("hp2p rx completion ring was created!\n"));
+ return ret;
+ }
+
+ DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
+ prot->d2hring_hp2p_rxcpl->idx));
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
+ BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
+ if (ret != BCME_OK)
+ return ret;
+
+ prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+ /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+ * so can not cleanup if one ring was created while the other failed
+ */
+ return BCME_OK;
+} /* dhd_prot_init_hp2p_rings */
+
+static void
+dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
+{
+ if (dhd->prot->d2hring_hp2p_txcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_txcpl = NULL;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl) {
+ dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
+ MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
+ dhd->prot->d2hring_hp2p_rxcpl = NULL;
+ }
+}
+#endif /* DHD_HP2P */
+
+#ifdef EWP_EDL
+static int
+dhd_check_create_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+ uint16 ringid;
+
+ {
+ /* dongle may increase max_submission_rings so keep
+ * ringid at end of dynamic rings (re-use info ring cpl ring id)
+ */
+ ringid = dhd->bus->max_tx_flowrings +
+ (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
+ BCMPCIE_H2D_COMMON_MSGRINGS + 1;
+ }
+
+ if (prot->d2hring_edl) {
+ prot->d2hring_edl->inited = FALSE;
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+ }
+
+ if (prot->d2hring_edl == NULL) {
+ prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+ if (prot->d2hring_edl == NULL) {
+ DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
+ __FUNCTION__));
+ return BCME_NOMEM;
+ }
+
+ DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
+ ringid));
+ ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
+ D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
+ ringid);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
+ __FUNCTION__));
+ goto err;
+ }
+ }
+
+ return ret;
+err:
+ MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
+ prot->d2hring_edl = NULL;
+
+ return ret;
+} /* dhd_check_create_btlog_rings */
+
+int
+dhd_prot_init_edl_rings(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = BCME_ERROR;
+
+ if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
DHD_ERROR(("%s: EDL rings aren't created! \n",
__FUNCTION__));
return ret;
return ret;
prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
return BCME_OK;
} /* dhd_prot_init_btlog_rings */
{
int ret = 0;
wlc_rev_info_t revinfo;
+ char buf[128];
+ dhd_prot_t *prot = dhd->prot;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
+ /* Get the RxBuf post size */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
+ __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
+ if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
+ DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
+ __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
+ prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
+ }
+ }
+
+ /* Post buffers for packet reception */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+
DHD_SSSR_DUMP_INIT(dhd);
dhd_process_cid_mac(dhd, TRUE);
dhd_process_cid_mac(dhd, FALSE);
#if defined(DHD_H2D_LOG_TIME_SYNC)
+#ifdef DHD_HP2P
+ if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
+ if (dhd->hp2p_enable) {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
+ } else {
+ dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+ }
+#else
if (FW_SUPPORTED(dhd, h2dlogts)) {
dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
+#endif // endif
dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
/* This is during initialization. */
dhd_h2d_log_time_sync(dhd);
} else {
dhd->dhd_rte_time_sync_ms = 0;
}
-#endif /* DHD_H2D_LOG_TIME_SYNC */
+#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
void *p, **pktbuf;
- uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
void *msg_start;
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
void *lcl_buf;
uint16 lcl_buf_size;
+ uint16 pktsz = prot->rxbufpost_sz;
-#ifdef WL_MONITOR
- if (dhd->monitor_enable) {
- pktsz = DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ;
- }
-#endif /* WL_MONITOR */
/* allocate a local buffer to store pkt buffer va, pa and length */
lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
RX_BUF_BURST;
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
-#ifdef DHD_MAP_LOGGING
if (dhd->smmu_fault_occurred) {
more = FALSE;
break;
}
-#endif /* DHD_MAP_LOGGING */
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the message from ring */
return FALSE;
}
+ if (dhd_query_bus_erros(dhd)) {
+ return FALSE;
+ }
+
if (dhd->hang_was_sent) {
return FALSE;
}
}
if (items > D2HRING_EDL_WATERMARK) {
- DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u \n",
- __FUNCTION__, items));
+ DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
+ " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
+ ring->rd, ring->wr, depth));
}
dhd_schedule_logtrace(dhd->info);
* doorbell is acceptable since EDL only contains debug data
*/
num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
if (num_items == 0) {
/* no work items in edl ring */
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
return 0;
}
/* if space is available, calculate address to be read */
msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
- n = max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
+
+ max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
+
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr);
+ n = max_items_to_process;
while (n > 0) {
msg = (cmn_msg_hdr_t *)msg_addr;
/* wait for DMA of work item to complete */
- if ((err = dhd_prot_d2h_sync_edl(dhd, ring, msg)) != BCME_OK) {
- DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n",
- __FUNCTION__, err));
+ if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
+ DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
+ "ring; err = %d\n", __FUNCTION__, err));
}
+
/*
* Update the curr_rd to the current index in the ring, from where
* the work item is fetched. This way if the fetched work item
/* process the edl work item, i.e, the event log */
err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
- /* memset the read work item space to zero to avoid
- * any accidental matching of seqnum
- */
- memset(msg_addr, 0, ring->item_len);
-
- /* update rd index by 1
- * The TCM rd index is updated only if bus is not
- * in D3. Else, the rd index is updated from resume
- * context in - 'dhdpcie_bus_suspend'
- */
- DHD_RING_LOCK(ring->ring_lock, flags);
- ring->rd = ring->curr_rd;
- ASSERT(ring->rd < ring->max_items);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
- DHD_GENERAL_LOCK(dhd, flags);
- if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
- DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
- __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
- } else {
- DHD_EDL_RING_TCM_RD_UPDATE(dhd);
- }
- DHD_GENERAL_UNLOCK(dhd, flags);
+ /* Dummy sleep so that scheduler kicks in after processing any logprints */
+ OSL_SLEEP(0);
/* Prefetch data to populate the cache */
OSL_PREFETCH(msg_addr + ring->item_len);
--n;
}
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ /* update host ring read pointer */
+ if ((ring->rd + max_items_to_process) >= ring->max_items)
+ ring->rd = 0;
+ else
+ ring->rd += max_items_to_process;
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
+
+ /* Now after processing max_items_to_process update dongle rd index.
+ * The TCM rd index is updated only if bus is not
+ * in D3. Else, the rd index is updated from resume
+ * context in - 'dhdpcie_bus_suspend'
+ */
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
+ DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_EDL_RING_TCM_RD_UPDATE(dhd);
+ }
+
/* if num_items > bound, then anyway we will reschedule and
* this function runs again, so that if in between the DPC has
* updated the wr index, then the updated wr is read. But if
dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
{
dhd_prot_t *prot = NULL;
+ unsigned long flags = 0;
+ msgbuf_ring_t *ring = NULL;
if (!dhd)
return;
if (!prot || !prot->d2hring_edl)
return;
- dhd_prot_upd_read_idx(dhd, prot->d2hring_edl);
+ ring = prot->d2hring_edl;
+ DHD_RING_LOCK(ring->ring_lock, flags);
+ dhd_prot_upd_read_idx(dhd, ring);
+ DHD_RING_UNLOCK(ring->ring_lock, flags);
}
#endif /* EWP_EDL */
/* called when DHD needs to check for 'receive complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
{
bool more = FALSE;
uint n = 0;
dhd_prot_t *prot = dhd->prot;
- msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
- uint16 item_len = ring->item_len;
+ msgbuf_ring_t *ring;
+ uint16 item_len;
host_rxbuf_cmpl_t *msg = NULL;
uint8 *msg_addr;
uint32 msg_len;
uint32 pktid;
int i;
uint8 sync;
+ ts_timestamp_t *ts;
+ BCM_REFERENCE(ts);
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
+ ring = prot->d2hring_hp2p_rxcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &prot->d2hring_rx_cpln;
+ item_len = ring->item_len;
while (1) {
if (dhd_is_device_removed(dhd))
break;
+ if (dhd_query_bus_erros(dhd))
+ break;
+
if (dhd->hang_was_sent)
break;
-#ifdef DHD_MAP_LOGGING
if (dhd->smmu_fault_occurred) {
break;
}
-#endif /* DHD_MAP_LOGGING */
pkt_cnt = 0;
pktqhead = pkt_newidx = NULL;
}
/* Actual length of the packet */
PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
+
#if defined(WL_MONITOR)
- if (dhd_monitor_enabled(dhd, ifidx) &&
- (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
- dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
- continue;
+ if (dhd_monitor_enabled(dhd, ifidx)) {
+ if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+ continue;
+ } else {
+ DHD_ERROR(("Received non 802.11 packet, "
+ "when monitor mode is enabled\n"));
+ }
}
-#endif // endif
+#endif /* WL_MONITOR */
if (!pktqhead) {
pktqhead = prevpkt = pkt;
}
}
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_rxstats(dhd, msg);
+ }
+#endif /* DHD_HP2P */
+
#ifdef DHD_LBUF_AUDIT
PKTAUDIT(dhd->osh, pkt);
#endif // endif
}
/* Call lb_dispatch only if packets are queued */
- if (n) {
+ if (n &&
+#ifdef WL_MONITOR
+ !(dhd_monitor_enabled(dhd, ifidx)) &&
+#endif /* WL_MONITOR */
+ TRUE) {
DHD_LB_DISPATCH_RX_COMPL(dhd);
DHD_LB_DISPATCH_RX_PROCESS(dhd);
}
return more;
+
}
/**
/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
{
bool more = TRUE;
uint n = 0;
- msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+ msgbuf_ring_t *ring;
unsigned long flags;
+#ifdef DHD_HP2P
+ if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
+ ring = dhd->prot->d2hring_hp2p_txcpl;
+ else
+#endif /* DHD_HP2P */
+ ring = &dhd->prot->d2hring_tx_cpln;
+
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ more = FALSE;
+ break;
+ }
+
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
-#ifdef DHD_MAP_LOGGING
if (dhd->smmu_fault_occurred) {
more = FALSE;
break;
}
-#endif /* DHD_MAP_LOGGING */
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
uint8 *msg_addr;
uint32 msg_len;
+ if (dhd_query_bus_erros(dhd)) {
+ break;
+ }
+
if (dhd->hang_was_sent) {
break;
}
-#ifdef DHD_MAP_LOGGING
if (dhd->smmu_fault_occurred) {
break;
}
-#endif /* DHD_MAP_LOGGING */
DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
goto done;
}
-#ifdef DHD_MAP_LOGGING
if (dhd->smmu_fault_occurred) {
ret = BCME_ERROR;
goto done;
}
-#endif /* DHD_MAP_LOGGING */
msg = (cmn_msg_hdr_t *)buf;
DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
}
}
+#ifdef DHD_HP2P
+ else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
+ DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+ else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
+ if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
+ DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ }
+ else
+ DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
+ } else {
+ DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
+ }
+ }
+#endif /* DHD_HP2P */
else {
DHD_ERROR(("don;t know how to pair with original request\n"));
}
}
#endif // endif
+ dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
+
DHD_GENERAL_LOCK(dhd, flags);
if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
unsigned long flags;
dhd_dma_buf_t retbuf;
+ /* Check for ioctl timeout induce flag, which is set by firing
+ * dhd iovar to induce IOCTL timeout. If flag is set,
+ * return from here, which results in to IOCTL timeout.
+ */
+ if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
+ DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
+ return;
+ }
+
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
return;
}
+ dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
+
/* Clear Response pending bit */
prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
DHD_GENERAL_UNLOCK(dhd, flags);
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
}
+int
+dhd_prot_check_tx_resource(dhd_pub_t *dhd)
+{
+ return dhd->prot->no_tx_resource;
+}
+
+void
+dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_stop_cnt++;
+}
+
+void
+dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
+{
+ dhd->prot->pktid_txq_start_cnt++;
+}
+
/** called on MSG_TYPE_TX_STATUS message received from dongle */
static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
void *secdma;
bool pkt_fate;
msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
-#ifdef TX_STATUS_LATENCY_STATS
+#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
flow_info_t *flow_info;
uint64 tx_status_latency;
-#endif /* TX_STATUS_LATENCY_STATS */
+#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
#if defined(TX_STATUS_LATENCY_STATS)
flow_ring_node_t *flow_ring_node;
uint16 flowid;
#endif // endif
+ ts_timestamp_t *ts;
+ BCM_REFERENCE(ts);
txstatus = (host_txbuf_cmpl_t *)msg;
#if defined(TX_STATUS_LATENCY_STATS)
flowid = txstatus->compl_hdr.flow_ring_id;
return;
}
+ if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
+ dhd->prot->no_tx_resource = FALSE;
+ dhd_bus_start_queue(dhd->bus);
+ }
+
if (SECURE_DMA_ENAB(dhd->osh)) {
int offset = 0;
BCM_REFERENCE(offset);
#endif /* DMAMAP_STATS */
pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
+
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, pkt_fate);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#endif // endif
#if DHD_DBG_SHOW_METADATA
}
#endif /* DHD_DBG_SHOW_METADATA */
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+#ifdef DHD_HP2P_DEBUG
+ bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
+#endif /* DHD_HP2P_DEBUG */
+ dhd_update_hp2p_txstats(dhd, txstatus);
+ }
+#endif /* DHD_HP2P */
+
#ifdef DHD_LBUF_AUDIT
PKTAUDIT(dhd->osh, pkt);
#endif // endif
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- PKTFREE(dhd->osh, pkt, TRUE);
- DHD_RING_LOCK(ring->ring_lock, flags);
DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
txstatus->tx_status);
DHD_RING_UNLOCK(ring->ring_lock, flags);
-
+ PKTFREE(dhd->osh, pkt, TRUE);
return;
} /* dhd_prot_txstatus_process */
DHD_ERROR(("dhd flow_ring_table is NULL\n"));
return BCME_NORESOURCE;
}
+#ifdef DHD_PCIE_PKTID
+ if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
+ if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
+ dhd_bus_stop_queue(dhd->bus);
+ dhd->prot->no_tx_resource = TRUE;
+ }
+ dhd->prot->pktid_depleted_cnt++;
+ goto err_no_res;
+ } else {
+ dhd->prot->pktid_depleted_cnt = 0;
+ }
+#endif /* DHD_PCIE_PKTID */
flowid = DHD_PKT_GET_FLOWID(PKTBUF);
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
goto err_free_pktid;
}
- DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
-
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
} else {
- txdesc->metadata_buf_len = htol16(0);
- txdesc->metadata_buf_addr.high_addr = 0;
- txdesc->metadata_buf_addr.low_addr = 0;
+#ifdef DHD_HP2P
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_update_hp2p_txdesc(dhd, txdesc);
+ } else
+#endif /* DHD_HP2P */
+ if (1)
+ {
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
+ }
}
#ifdef DHD_PKTID_AUDIT_RING
}
/* Update the write pointer in TCM & ring bell */
-#ifdef TXP_FLUSH_NITEMS
+#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
+ if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
+ dhd_calc_hp2p_burst(dhd, ring, flowid);
+ } else {
+ if ((ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+ }
+ }
+#elif defined(TXP_FLUSH_NITEMS)
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
if ((ring->pend_items_count == prot->txp_threshold) ||
#else
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
-#endif // endif
-#ifdef TX_STATUS_LATENCY_STATS
+#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
+
+#if defined(TX_STATUS_LATENCY_STATS)
/* set the time when pkt is queued to flowring */
DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
-#endif /* TX_STATUS_LATENCY_STATS */
+#endif // endif
OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
/*
#endif /* DHD_PCIE_PKTID */
DHD_RING_UNLOCK(ring->ring_lock, flags);
-
+err_no_res:
return BCME_NORESOURCE;
} /* dhd_prot_txdata */
uint8 action;
if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (dhd_query_bus_erros(dhd)) {
+ DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
goto done;
}
if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
- DHD_ERROR(("%s : bus is down. we have nothing to do -"
+ DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
" bus state: %d, sent hang: %d\n", __FUNCTION__,
dhd->busstate, dhd->hang_was_sent));
goto done;
dhd_dma_buf_free(dhdp, dmmap->dstmem);
MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
+
+ dhdp->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhdp);
+
dmmap = NULL;
} /* dmaxfer_free_prev_dmaaddr */
dhd_prot_t *prot = dhd->prot;
uint64 end_usec;
pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
+ int buf_free_scheduled;
BCM_REFERENCE(cmplt);
end_usec = OSL_SYSUPTIME_US();
prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
cmplt->compl_hdr.status != BCME_OK) {
DHD_ERROR(("DMA loopback failed\n"));
+ /* it is observed that some times the completion
+ * header status is set as OK, but the memcmp fails
+ * hence always explicitly set the dmaxfer status
+ * as error if this happens.
+ */
+ prot->dmaxfer.status = BCME_ERROR;
prhex("XFER SRC: ",
prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
prhex("XFER DST: ",
prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
- prot->dmaxfer.status = BCME_ERROR;
}
else {
switch (prot->dmaxfer.d11_lpbk) {
}
}
- dhd_prepare_schedule_dmaxfer_free(dhd);
+ buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
end_usec -= prot->dmaxfer.start_usec;
- if (end_usec)
+ if (end_usec) {
+ prot->dmaxfer.time_taken = end_usec;
DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
prot->dmaxfer.len, (unsigned long)end_usec,
(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
+ }
dhd->prot->dmaxfer.in_progress = FALSE;
- dhd->bus->dmaxfer_complete = TRUE;
- dhd_os_dmaxfer_wake(dhd);
+ if (buf_free_scheduled != BCME_OK) {
+ dhd->bus->dmaxfer_complete = TRUE;
+ dhd_os_dmaxfer_wake(dhd);
+ }
}
/** Test functionality.
return BCME_OK;
} /* dhdmsgbuf_dmaxfer_req */
-dma_xfer_status_t
-dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd)
+int
+dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
{
dhd_prot_t *prot = dhd->prot;
if (prot->dmaxfer.in_progress)
- return DMA_XFER_IN_PROGRESS;
- else if (prot->dmaxfer.status == BCME_OK)
- return DMA_XFER_SUCCESS;
+ result->status = DMA_XFER_IN_PROGRESS;
+ else if (prot->dmaxfer.status == 0)
+ result->status = DMA_XFER_SUCCESS;
else
- return DMA_XFER_FAILED;
+ result->status = DMA_XFER_FAILED;
+
+ result->type = prot->dmaxfer.d11_lpbk;
+ result->error_code = prot->dmaxfer.status;
+ result->num_bytes = prot->dmaxfer.len;
+ result->time_taken = prot->dmaxfer.time_taken;
+ if (prot->dmaxfer.time_taken) {
+ /* throughput in kBps */
+ result->tput =
+ (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
+ (uint32)prot->dmaxfer.time_taken;
+ }
+
+ return BCME_OK;
}
/** Called in the process of submitting an ioctl to the dongle */
void
dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
{
-
uint32 intstatus;
dhd_prot_t *prot = dhd->prot;
-
dhd->rxcnt_timeout++;
dhd->rx_ctlerrs++;
dhd->iovar_timeout_occured = TRUE;
- DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
- "trans_id %d state %d busstate=%d ioctl_received=%d\n",
- __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
- prot->ioctl_trans_id, prot->ioctl_state,
- dhd->busstate, prot->ioctl_received));
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
+ dhd->is_sched_error ? " due to scheduling problem" : "",
+ dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
+ prot->ioctl_state, dhd->busstate, prot->ioctl_received));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+
if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
prot->curr_ioctl_cmd == WLC_GET_VAR) {
char iovbuf[32];
/* Check the PCIe link status by reading intstatus register */
intstatus = si_corereg(dhd->bus->sih,
- dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
+ dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
if (intstatus == (uint32)-1) {
DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
dhd->bus->is_linkdown = TRUE;
goto out;
}
- timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
#ifdef DHD_RECOVER_TIMEOUT
if (prot->ioctl_received == 0) {
dhd->bus->ipend = TRUE;
dhd->bus->dpc_sched = TRUE;
dhd_sched_dpc(dhd);
- timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
}
}
} else {
}
if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+ /* check if resumed on time out related to scheduling issue */
+ dhd->is_sched_error = FALSE;
+ if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
+ dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
+ }
dhd_msgbuf_iovar_timeout_dump(dhd);
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP */
+
ret = -ETIMEDOUT;
goto out;
} else {
}
#ifdef DHD_DUMP_PCIE_RINGS
-int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn)
+int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
+ unsigned long *file_posn, bool file_write)
{
dhd_prot_t *prot;
msgbuf_ring_t *ring;
int ret = 0;
+ uint16 h2d_flowrings_total;
+ uint16 flowid;
if (!(dhd) || !(dhd->prot)) {
goto exit;
/* Below is the same ring dump sequence followed in parser as well. */
ring = &prot->h2dring_ctrl_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
ring = &prot->h2dring_rxp_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
ring = &prot->d2hring_ctrl_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
ring = &prot->d2hring_tx_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
ring = &prot->d2hring_rx_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
- ring = prot->h2d_flowrings_pool;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
- goto exit;
+ h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
+ goto exit;
+ }
+ }
#ifdef EWP_EDL
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
+ if (dhd->dongle_edl_support) {
+ ring = prot->d2hring_edl;
+ if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
+ goto exit;
+ }
+ else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
#else
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
#endif /* EWP_EDL */
{
ring = prot->h2dring_info_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
ring = prot->d2hring_info_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
+ if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
goto exit;
}
/* Write to file */
static
-int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn)
+int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
+ const void *user_buf, unsigned long *file_posn)
{
int ret = 0;
__FUNCTION__));
return BCME_ERROR;
}
- ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
- ((unsigned long)(ring->max_items) * (ring->item_len)));
- if (ret < 0) {
- DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
- return BCME_ERROR;
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
+ ((unsigned long)(ring->max_items) * (ring->item_len)));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ }
+ } else if (user_buf) {
+ ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
+ ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
}
- return BCME_OK;
+ return ret;
}
#endif /* DHD_DUMP_PCIE_RINGS */
+#ifdef EWP_EDL
+/* Write to file */
+static
+int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
+ unsigned long *file_posn)
+{
+ int ret = 0, nitems = 0;
+ char *buf = NULL, *ptr = NULL;
+ uint8 *msg_addr = NULL;
+ uint16 rd = 0;
+
+ if (ring == NULL) {
+ DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ if (buf == NULL) {
+ DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto done;
+ }
+ ptr = buf;
+
+ for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
+ msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
+ memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
+ ptr += D2HRING_EDL_HDR_SIZE;
+ }
+ if (file) {
+ ret = dhd_os_write_file_posn(file, file_posn, buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
+ if (ret < 0) {
+ DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
+ goto done;
+ }
+ }
+ else {
+ ret = dhd_export_debug_data(buf, NULL, user_buf,
+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
+ }
+
+done:
+ if (buf) {
+ MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
+ }
+ return ret;
+}
+#endif /* EWP_EDL */
+
/** Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
dhd->prot->rw_index_sz);
bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, dhd->prot->h2d_max_txpost);
+ bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
+ bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
+ bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
}
/* Update local copy of dongle statistics */
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
+ /* HWA TODO, need to get RD pointer from different array
+ * which HWA will directly write into host memory
+ */
/* if alloc failed , invalidate cached read ptr */
if (dhd->dma_d2h_ring_upd_support) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
+ prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
+
if (buf)
memcpy(ioct_buf, buf, len);
ring->wr = 0;
ring->rd = 0;
ring->curr_rd = 0;
+ /* Reset hwa_db_type for all rings,
+ * for data path rings, it will be assigned separately post init
+ * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
+ */
+ ring->hwa_db_type = 0;
/* CAUTION: ring::base_addr already in Little Endian */
dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
} /* dhd_prot_ring_detach */
-/*
- * +----------------------------------------------------------------------------
- * Flowring Pool
- *
- * Unlike common rings, which are attached very early on (dhd_prot_attach),
- * flowrings are dynamically instantiated. Moreover, flowrings may require a
- * larger DMA-able buffer. To avoid issues with fragmented cache coherent
- * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
- * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
- *
- * Each DMA-able buffer may be allocated independently, or may be carved out
- * of a single large contiguous region that is registered with the protocol
- * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
- * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
- *
- * No flowring pool action is performed in dhd_prot_attach(), as the number
- * of h2d rings is not yet known.
- *
- * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
- * determine the number of flowrings required, and a pool of msgbuf_rings are
- * allocated and a DMA-able buffer (carved or allocated) is attached.
- * See: dhd_prot_flowrings_pool_attach()
- *
- * A flowring msgbuf_ring object may be fetched from this pool during flowring
- * creation, using the flowid. Likewise, flowrings may be freed back into the
- * pool on flowring deletion.
- * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
- *
- * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
- * are detached (returned back to the carved region or freed), and the pool of
- * msgbuf_ring and any objects allocated against it are freed.
- * See: dhd_prot_flowrings_pool_detach()
- *
- * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
- * state as-if upon an attach. All DMA-able buffers are retained.
- * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
- * pool attach will notice that the pool persists and continue to use it. This
- * will avoid the case of a fragmented DMA-able region.
- *
- * +----------------------------------------------------------------------------
- */
-
-/* Conversion of a flowid to a flowring pool index */
-#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
- ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
-
-/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
-#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
- (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
- DHD_FLOWRINGS_POOL_OFFSET(flowid)
-
-/* Traverse each flowring in the flowring pool, assigning ring and flowid */
-#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
- for ((flowid) = DHD_FLOWRING_START_FLOWID, \
- (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
- (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
- (ring)++, (flowid)++)
-
/* Fetch number of H2D flowrings given the total number of h2d rings */
-static uint16
+uint16
dhd_get_max_flow_rings(dhd_pub_t *dhd)
{
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
goto attach_fail;
}
+ /*
+ * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
+ * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
+ */
+ ring->hwa_db_type = 0;
}
return BCME_OK;
/* cache flush */
OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+ return;
+ }
+
if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
dhd_prot_dma_indx_set(dhd, ring->wr,
H2D_DMA_INDX_WR_UPD, ring->idx);
uint32 db_index;
uint corerev;
+ /* For HWA, update db_index and ring mb2 DB and return */
+ if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
+ db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
+ DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
+ __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+ return;
+ }
+
/* update read index */
/* If dma'ing h2d indices supported
* update the r -indices in the
*/
if (IDMA_ACTIVE(dhd)) {
dhd_prot_dma_indx_set(dhd, ring->rd,
- D2H_DMA_INDX_RD_UPD, ring->idx);
+ D2H_DMA_INDX_RD_UPD, ring->idx);
db_index = IDMA_IDX1;
if (dhd->bus->sih) {
corerev = dhd->bus->sih->buscorerev;
dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
__FUNCTION__, flow_ring_node->flowid));
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
return BCME_NOMEM;
}
flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+ flow_create_rqst->if_flags = 0;
+
+#ifdef DHD_HP2P
+ /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
+ /* and traffic is not multicast */
+ /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
+ /* Allow only one HP2P Flow active at a time */
+ if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
+ flow_ring_node->flow_info.tid == HP2P_PRIO &&
+ (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
+ !ETHER_ISMULTI(flow_create_rqst->da)) {
+ flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
+ flow_ring_node->hp2p_ring = TRUE;
+ dhd->hp2p_ring_active = TRUE;
+
+ DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid,
+ flow_ring_node->flowid));
+ }
+#endif /* DHD_HP2P */
/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
* currently it is not used for priority. so uses solely for ifrm mask
ltoh16(resp->cmplt.ring_id),
ltoh32(resp->cmn_hdr.request_id)));
if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
- (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID)) {
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
+#ifdef DHD_HP2P
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
+ (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
+#endif /* DHD_HP2P */
+ TRUE) {
DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
return;
}
#endif /* EWP_EDL */
}
+#ifdef DHD_HP2P
+ if (dhd->prot->d2hring_hp2p_txcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
+ DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
+ }
+ if (dhd->prot->d2hring_hp2p_rxcpl &&
+ ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
+ if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
+ DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
+ return;
+ }
+
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
+ }
+ dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
+ dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
+ }
+#endif /* DHD_HP2P */
}
static void
if (fmt == NULL) {
fmt = default_fmt;
}
+
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
+ return;
+ }
+
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
msgbuf_ring_t *ring;
uint16 rd, wr;
uint32 dma_buf_len;
+ uint64 current_time;
+ ulong ring_tcm_rd_addr; /* dongle address */
+ ulong ring_tcm_wr_addr; /* dongle address */
DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
DHD_ERROR(("DHD: %s\n", dhd_version));
DHD_ERROR(("Firmware: %s\n", fw_version));
+#ifdef DHD_FW_COREDUMP
+ DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
+ DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
+#endif /* DHD_FW_COREDUMP */
+
DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
prot->device_ipc_version,
DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, prot->h2d_max_txpost));
+ current_time = OSL_LOCALTIME_NS();
+ DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
+ DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
+ " ioctl_ack_time="SEC_USEC_FMT
+ " ioctl_cmplt_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(prot->ioctl_fillup_time),
+ GET_SEC_USEC(prot->ioctl_ack_time),
+ GET_SEC_USEC(prot->ioctl_cmplt_time)));
+
+ /* Check PCIe INT registers */
+ if (!dhd_pcie_dump_int_regs(dhd)) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
ring = &prot->h2dring_ctrl_subn;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
ring = &prot->d2hring_ctrl_cpln;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
ring = prot->h2dring_info_subn;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
ring = prot->d2hring_info_cpln;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
ring = &prot->d2hring_tx_cpln;
if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
ring = &prot->d2hring_rx_cpln;
if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
#ifdef EWP_EDL
ring = prot->d2hring_edl;
if (ring) {
+ ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
+ ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
+ "SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
+ dma_buf_len));
DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
if (dhd->bus->is_linkdown) {
DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
+
+ DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
+ DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
+ DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
dhd_pcie_debug_info_dump(dhd);
return dhdp->prot->ioctl_trans_id;
}
-void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len)
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
{
+ if (!dhd->hscb_enable) {
+ if (len) {
+ /* prevent "Operation not supported" dhd message */
+ *len = 0;
+ return BCME_OK;
+ }
+ return BCME_UNSUPPORTED;
+ }
+
if (va) {
- *va = prot->host_scb_buf.va;
+ *va = dhd->prot->host_scb_buf.va;
}
if (len) {
- *len = prot->host_scb_buf.len;
+ *len = dhd->prot->host_scb_buf.len;
+ }
+
+ return BCME_OK;
+}
+
+#ifdef DHD_HP2P
+uint32
+dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_thresh = (uint16)val;
+
+ val = dhd->pkt_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->time_thresh = (uint16)val;
+
+ val = dhd->time_thresh;
+
+ return val;
+}
+
+uint32
+dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ if (set)
+ dhd->pkt_expiry = (uint16)val;
+
+ val = dhd->pkt_expiry;
+
+ return val;
+}
+
+uint8
+dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
+{
+ uint8 ret = 0;
+ if (set) {
+ dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
+ dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
+
+ if (enable) {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
+ } else {
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
+ }
+ }
+ ret = dhd->hp2p_infra_enable ? 0x1:0x0;
+ ret <<= 4;
+ ret |= dhd->hp2p_enable ? 0x1:0x0;
+
+ return ret;
+}
+
+static void
+dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
+ hp2p_info_t *hp2p_info;
+ uint32 dur1;
+
+ hp2p_info = &dhd->hp2p_info[0];
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
+
+ if (dur1 > (MAX_RX_HIST_BIN - 1)) {
+ dur1 = MAX_RX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n",
+ __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
+ return;
+}
+
+static void
+dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
+{
+ ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
+ uint16 flowid = txstatus->compl_hdr.flow_ring_id;
+ uint32 hp2p_flowid, dur1, dur2;
+ hp2p_info_t *hp2p_info;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+ ts = (ts_timestamp_t *)&(txstatus->ts);
+
+ dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur1 > (MAX_TX_HIST_BIN - 1)) {
+ dur1 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+ hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
+
+ dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
+ if (dur2 > (MAX_TX_HIST_BIN - 1)) {
+ dur2 = MAX_TX_HIST_BIN - 1;
+ DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
+ }
+
+ hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
+ return;
+}
+
+enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
+{
+ hp2p_info_t *hp2p_info;
+ unsigned long flags;
+ dhd_pub_t *dhdp;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+ dhdp = hp2p_info->dhd_pub;
+ if (!dhdp) {
+ goto done;
+ }
+
+ DHD_INFO(("%s: pend_item = %d flowid = %d\n",
+ __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
+ hp2p_info->flowid));
+
+ flags = dhd_os_hp2plock(dhdp);
+
+ dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->num_timer_limit++;
+
+ dhd_os_hp2punlock(dhdp, flags);
+done:
+ return HRTIMER_NORESTART;
+}
+
+static void
+dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
+{
+ hp2p_info_t *hp2p_info;
+ uint16 hp2p_flowid;
+
+ hp2p_flowid = dhd->bus->max_submission_rings -
+ dhd->bus->max_cmn_rings - flowid + 1;
+ hp2p_info = &dhd->hp2p_info[hp2p_flowid];
+
+ if (ring->pend_items_count == dhd->pkt_thresh) {
+ dhd_prot_txdata_write_flush(dhd, flowid);
+
+ hp2p_info->hrtimer_init = FALSE;
+ hp2p_info->ring = NULL;
+ hp2p_info->num_pkt_limit++;
+ hrtimer_cancel(&hp2p_info->timer.timer);
+
+ DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
+ "hp2p_flowid = %d pkt_thresh = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
+ } else {
+ if (hp2p_info->hrtimer_init == FALSE) {
+ hp2p_info->hrtimer_init = TRUE;
+ hp2p_info->flowid = flowid;
+ hp2p_info->dhd_pub = dhd;
+ hp2p_info->ring = ring;
+ hp2p_info->num_timer_start++;
+
+ tasklet_hrtimer_start(&hp2p_info->timer,
+ ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
+
+ DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
+ __FUNCTION__, flowid, hp2p_flowid));
+ }
}
+ return;
+}
+
+static void
+dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
+{
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ txdesc->metadata_buf_len = 0;
+ txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
+ txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
+ txdesc->exp_time = dhd->pkt_expiry;
+
+ DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
+ __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
+ txdesc->metadata_buf_addr.low_addr,
+ txdesc->exp_time));
+
+ return;
}
+#endif /* DHD_HP2P */
-void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff)
+#ifdef DHD_MAP_LOGGING
+void
+dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
{
- memcpy(buff, (char*)prot->host_scb_buf.va + offset, length);
+ dhd_prot_debug_info_print(dhdp);
+ OSL_DMA_MAP_DUMP(dhdp->osh);
+#ifdef DHD_MAP_PKTID_LOGGING
+ dhd_pktid_logging_dump(dhdp);
+#endif /* DHD_MAP_PKTID_LOGGING */
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhd_bus_get_mem_dump(dhdp);
+#else
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* DHD_FW_COREDUMP */
}
+#endif /* DHD_MAP_LOGGING */
/*
* DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie.c 771178 2018-07-09 08:34:50Z $
+ * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $
*/
/* include files */
#include <bcmutils.h>
#include <bcmdevs.h>
#include <siutils.h>
+#include <hndoobr.h>
#include <hndsoc.h>
#include <hndpmu.h>
#include <etd.h>
#include <hnd_debug.h>
#include <sbchipc.h>
+#include <sbhndarm.h>
#include <hnd_armtrap.h>
#if defined(DHD_DEBUG)
#include <hnd_cons.h>
#include <dhd_pcie.h>
#include <bcmpcie.h>
#include <bcmendian.h>
+#include <bcmstdlib_s.h>
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#include <debugger.h>
#endif /* DEBUGGER || DHD_DSCOPE */
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <dhd_linux_wq.h>
+#include <dhd_linux.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+#include <dhd_linux_priv.h>
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
+#include <otpdefs.h>
#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
+#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
+#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
+
+#define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
+#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
+#define ACC_MPU_SHIFT 25
+#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
+
+#define REG_WORK_AROUND (0x1e4/sizeof(uint32))
+
#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
int dhd_dongle_memsize;
int dhd_dongle_ramsize;
+struct dhd_bus *g_dhd_bus = NULL;
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void dhd_log_dump_axi_error(uint8 *axi_err);
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
#if defined(DHD_FW_COREDUMP)
-struct dhd_bus *g_dhd_bus = NULL;
static int dhdpcie_mem_dump(dhd_bus_t *bus);
+static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
bool dongle_isolation, bool reset_flag);
static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
static void dhdpcie_fw_trap(dhd_bus_t *bus);
static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
extern void dhd_dpc_enable(dhd_pub_t *dhdp);
extern void dhd_dpc_kill(dhd_pub_t *dhdp);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#ifdef IDLE_TX_FLOW_MGMT
static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
static void dhd_bus_idle_scan(dhd_bus_t *bus);
#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
-static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
+static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
static int dhdpcie_init_d11status(struct dhd_bus *bus);
extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
-#ifdef DHD_SSSR_DUMP
-static int dhdpcie_sssr_dump(dhd_pub_t *dhd);
-#endif /* DHD_SSSR_DUMP */
+#ifdef DHD_HP2P
+extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
+static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
+#endif // endif
+#define NUM_PATTERNS 2
+static bool dhd_bus_tcm_test(struct dhd_bus *bus);
/* IOVar table */
enum {
IOV_CTO_PREVENTION,
IOV_PCIE_WD_RESET,
IOV_DUMP_DONGLE,
+ IOV_HWA_ENAB_BMAP,
IOV_IDMA_ENABLE,
IOV_IFRM_ENABLE,
IOV_CLEAR_RING,
IOV_INB_DW_ENABLE,
IOV_CTO_THRESHOLD,
IOV_HSCBSIZE, /* get HSCB buffer size */
- IOV_HSCBBYTES, /* copy HSCB buffer */
+ IOV_HP2P_ENABLE,
+ IOV_HP2P_PKT_THRESHOLD,
+ IOV_HP2P_TIME_THRESHOLD,
+ IOV_HP2P_PKT_EXPIRY,
+ IOV_HP2P_TXCPL_MAXITEMS,
+ IOV_HP2P_RXCPL_MAXITEMS,
+ IOV_EXTDTXS_IN_TXCPL,
+ IOV_HOSTRDY_AFTER_INIT,
IOV_PCIE_LAST /**< unused IOVAR */
};
{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
{"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
{"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
- {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
- {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
+ {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
{"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
{"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
{"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
+ {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 },
{"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
{"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
- {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+#ifdef DHD_HP2P
+ {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+ {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
+#endif // endif
+ {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
+ {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
{NULL, 0, 0, 0, 0, 0 }
};
-#define MAX_READ_TIMEOUT 5 * 1000 * 1000
+#define MAX_READ_TIMEOUT 2 * 1000 * 1000
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 64
return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
}
+/*
+ * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
+ */
+static INLINE void
+dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
+{
+ if (enable) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, offset,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx, offset,
+ SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
+ }
+}
+
static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
{
{
uint mask, val;
- mask = SRPWR_DMN_ALL_MASK;
- val = SRPWR_DMN_ALL_MASK;
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
+ val = SRPWR_DMN_ALL_MASK(bus->sih);
si_srpwr_request(bus->sih, mask, val);
}
}
static INLINE void
-_dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus *bus)
+_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
{
uint mask;
- mask = SRPWR_DMN3_MACMAIN_MASK | SRPWR_DMN2_MACAUX_MASK;
+ mask = SRPWR_DMN_ALL_MASK(bus->sih);
si_srpwr_request(bus->sih, mask, 0);
}
unsigned long flags = 0;
DHD_GENERAL_LOCK(bus->dhd, flags);
- _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus);
+ _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
}
si_chipid(bus->sih) == BCM4375_CHIP_ID ||
si_chipid(bus->sih) == BCM4362_CHIP_ID ||
si_chipid(bus->sih) == BCM43751_CHIP_ID ||
+ si_chipid(bus->sih) == BCM43752_CHIP_ID ||
si_chipid(bus->sih) == BCM4361_CHIP_ID ||
si_chipid(bus->sih) == BCM4359_CHIP_ID) {
return FALSE;
ret = BCME_NORESOURCE;
break;
}
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->dhd->hostrdy_after_init = TRUE;
bus->db1_for_mb = TRUE;
bus->dhd->hang_report = TRUE;
bus->use_mailbox = FALSE;
bus->use_d0_inform = FALSE;
bus->intr_enabled = FALSE;
bus->flr_force_fail = FALSE;
+ /* By default disable HWA and enable it via iovar */
+ bus->hwa_enab_bmap = 0;
/* update the dma indices if set through module parameter. */
if (dma_ring_indices != 0) {
dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
bus->d2h_intr_method = PCIE_INTX;
#endif /* DHD_MSI_SUPPORT */
+#ifdef DHD_HP2P
+ bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
+ bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
+#endif /* DHD_HP2P */
+
DHD_TRACE(("%s: EXIT SUCCESS\n",
__FUNCTION__));
-#ifdef DHD_FW_COREDUMP
g_dhd_bus = bus;
-#endif // endif
*bus_ptr = bus;
return ret;
} while (0);
return bus->sih->chippkg;
}
+/** Conduct Loopback test */
+int
+dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
+{
+ dma_xfer_info_t dmaxfer_lpbk;
+ int ret = BCME_OK;
+
+#define PCIE_DMAXFER_LPBK_LENGTH 4096
+ memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
+ dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
+ dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
+ dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
+ dmaxfer_lpbk.type = type;
+ dmaxfer_lpbk.should_wait = TRUE;
+
+ ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
+ (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
+ if (ret < 0) {
+ DHD_ERROR(("failed to start PCIe Loopback Test!!! "
+ "Type:%d Reason:%d\n", type, ret));
+ return ret;
+ }
+
+ if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
+ DHD_ERROR(("failed to check PCIe Loopback Test!!! "
+ "Type:%d Status:%d Error code:%d\n", type,
+ dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
+ ret = BCME_ERROR;
+ } else {
+ DHD_ERROR(("successful to check PCIe Loopback Test"
+ " Type:%d\n", type));
+ }
+#undef PCIE_DMAXFER_LPBK_LENGTH
+
+ return ret;
+}
+
+/* Log the lastest DPC schedule time */
+void
+dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
+{
+ dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
+}
+
+/* Check if there is DPC scheduling errors */
+bool
+dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bool sched_err;
+
+ if (bus->dpc_entry_time < bus->isr_exit_time) {
+ /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
+ sched_err = TRUE;
+ } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
+ /* Kernel doesn't schedule the DPC after DHD tries to reschedule
+ * the DPC due to pending work items to be processed.
+ */
+ sched_err = TRUE;
+ } else {
+ sched_err = FALSE;
+ }
+
+ if (sched_err) {
+ /* print out minimum timestamp info */
+ DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT
+ " dpc_entry_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT
+ " dpc_sched_time="SEC_USEC_FMT
+ " resched_dpc_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time),
+ GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->dpc_exit_time),
+ GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->resched_dpc_time)));
+ }
+
+ return sched_err;
+}
+
/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
uint32
dhdpcie_bus_intstatus(dhd_bus_t *bus)
{
uint32 intstatus = 0;
-#ifndef DHD_READ_INTSTATUS_IN_DPC
uint32 intmask = 0;
-#endif /* DHD_READ_INTSTATUS_IN_DPC */
if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
/* this is a PCIE core register..not a config register... */
intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
-#ifndef DHD_READ_INTSTATUS_IN_DPC
/* this is a PCIE core register..not a config register... */
intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
- intstatus &= intmask;
-#endif /* DHD_READ_INTSTATUS_IN_DPC */
/* Is device removed. intstatus & intmask read 0xffffffff */
- if (intstatus == (uint32)-1) {
+ if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
+ DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
+ __FUNCTION__, intstatus, intmask));
bus->is_linkdown = TRUE;
dhd_pcie_debug_info_dump(bus->dhd);
return intstatus;
}
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+ intstatus &= intmask;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
/*
* The fourth argument to si_corereg is the "mask" fields of the register to update
* and the fifth field is the "value" to update. Now if we are interested in only
return intstatus;
}
+void
+dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus = dhd->bus;
+ int ret;
+
+ /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
+ */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+
+ /* Sleep for 1 seconds so that any AXI timeout
+ * if running on ALP clock also will be captured
+ */
+ OSL_SLEEP(1000);
+
+ /* reset backplane and cto,
+ * then access through pcie is recovered.
+ */
+ ret = dhdpcie_cto_error_recovery(bus);
+ if (!ret) {
+ /* Waiting for backplane reset */
+ OSL_SLEEP(10);
+ /* Dump debug Info */
+ dhd_prot_debug_info_print(bus->dhd);
+ /* Dump console buffer */
+ dhd_bus_dump_console_buffer(bus);
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ bus->dhd->collect_sssr = TRUE;
+#endif /* DHD_SSSR_DUMP */
+ bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+ }
+ bus->is_linkdown = TRUE;
+ bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
+ /* Send HANG event */
+ dhd_os_send_hang_message(bus->dhd);
+}
+
/**
* Name: dhdpcie_bus_isr
* Parameters:
/* read pci_intstatus */
intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
+ __FUNCTION__));
+ dhdpcie_disable_irq_nosync(bus);
+ break;
+ }
+
if (intstatus & PCI_CTO_INT_MASK) {
- /* reset backplane and cto,
- * then access through pcie is recovered.
+ DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
+ "intstat=0x%x enab=%d\n", __FUNCTION__,
+ intstatus, bus->cto_enable));
+ bus->cto_triggered = 1;
+ /*
+ * DAR still accessible
*/
- dhdpcie_cto_error_recovery(bus);
+ dhd_bus_dump_dar_registers(bus);
+
+ /* Disable further PCIe interrupts */
+ dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
+ /* Stop Tx flow */
+ dhd_bus_stop_queue(bus);
+
+ /* Schedule CTO recovery */
+ dhd_schedule_cto_recovery(bus->dhd);
+
return TRUE;
}
}
* so disable this for EFI
*/
DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
+ bus->non_ours_irq_count++;
+ bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
break;
}
OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
bus->saved_config.bar0_win);
- OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32),
- bus->saved_config.bar1_win);
+ dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
return BCME_OK;
}
void
dhdpcie_dongle_reset(dhd_bus_t *bus)
{
+ /* if the pcie link is down, watchdog reset
+ * should not be done, as it may hang
+ */
+ if (bus->is_linkdown) {
+ return;
+ }
+
/* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
#ifdef DHD_USE_BP_RESET
goto fail;
}
- if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
+ /* Configure CTO Prevention functionality */
#if defined(BCMFPGA_HW)
- DHD_ERROR(("Disable CTO\n"));
- bus->cto_enable = FALSE;
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
#else
+#if defined(BCMPCIE_CTO_PREVENTION)
+ if (bus->sih->buscorerev >= 24) {
DHD_ERROR(("Enable CTO\n"));
bus->cto_enable = TRUE;
-#endif // endif
- dhdpcie_cto_init(bus, bus->cto_enable);
+ } else
+#endif /* BCMPCIE_CTO_PREVENTION */
+ {
+ DHD_ERROR(("Disable CTO\n"));
+ bus->cto_enable = FALSE;
+ }
+#endif /* BCMFPGA_HW */
+
+ if (PCIECTO_ENAB(bus)) {
+ dhdpcie_cto_init(bus, TRUE);
+ }
+
+ if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
/*
* HW JIRA - CRWLPCIEGEN2-672
* Producer Index Feature which is used by F1 gets reset on F0 FLR
* fixed in REV68
*/
- if (bus->sih->buscorerev == 66) {
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
dhdpcie_ssreset_dis_enum_rst(bus);
}
dhd_bus_pcie_pwr_req_nolock(bus);
}
+ /* Get info on the ARM and SOCRAM cores... */
+ /* Should really be qualified by device id */
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ bus->coreid = si_coreid(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* CA7 requires coherent bits on */
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
+ (val | PCIE_BARCOHERENTACCEN_MASK));
+ }
+
/* Olympic EFI requirement - stop driver load if FW is already running
* need to do this here before pcie_watchdog_reset, because
* pcie_watchdog_reset will put the ARM back into halt state
}
#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
+ /* need to set the force_bt_quiesce flag here
+ * before calling dhdpcie_dongle_flr_or_pwr_toggle
+ */
+ bus->force_bt_quiesce = TRUE;
+ /*
+ * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
+ * So don't need BT quiesce.
+ */
+ if (bus->sih->buscorerev >= 66) {
+ bus->force_bt_quiesce = FALSE;
+ }
+
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
sbpcieregs = (sbpcieregs_t*)(bus->regs);
val = R_REG(osh, &sbpcieregs->configdata);
W_REG(osh, &sbpcieregs->configdata, val);
- /* Get info on the ARM and SOCRAM cores... */
- /* Should really be qualified by device id */
- if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
- bus->armrev = si_corerev(bus->sih);
- } else {
- DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
- goto fail;
- }
-
if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
/* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
* adjusted.
case BCM43751_CHIP_ID:
bus->dongle_ram_base = CR4_43751_RAM_BASE;
break;
+ case BCM43752_CHIP_ID:
+ bus->dongle_ram_base = CR4_43752_RAM_BASE;
+ break;
case BCM4375_CHIP_ID:
case BCM4369_CHIP_ID:
bus->dongle_ram_base = CR4_4369_RAM_BASE;
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req_clear_nolock(bus);
- }
- bus->force_bt_quiesce = TRUE;
+ /*
+ * One time clearing of Common Power Domain since HW default is set
+ * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
+ * for 4378B0 (rev 68).
+ * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
+ */
+ si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * Assert WL domain in DAR helps but not enum
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
+ }
+ }
return 0;
dhdpcie_bus_intr_enable(dhd_bus_t *bus)
{
DHD_TRACE(("%s Enter\n", __FUNCTION__));
- if (bus && bus->sih && !bus->is_linkdown) {
- /* Skip after recieving D3 ACK */
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
- return;
- }
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- dhpcie_bus_unmask_interrupt(bus);
- } else {
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
- bus->def_intmask, bus->def_intmask);
+ if (bus) {
+ if (bus->sih && !bus->is_linkdown) {
+ /* Skip after recieving D3 ACK */
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ return;
+ }
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ } else {
+ #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
+ dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
+ bus->def_intmask, TRUE);
+ #endif
+ si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
+ bus->def_intmask, bus->def_intmask);
+ }
}
+
}
+
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
bus->def_intmask, 0);
}
}
+
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
DHD_TRACE(("%s Enter\n", __FUNCTION__));
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
dhd_os_sdlock(bus->dhd);
if (bus->sih && !bus->dhd->dongle_isolation) {
- if (bus->sih->buscorerev == 66) {
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
dhd_bus_pcie_pwr_req_reload_war(bus);
}
*/
if (!bus->is_linkdown) {
+#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
+ /* for efi, depending on bt over pcie mode
+ * we either power toggle or do F0 FLR
+ * from dhdpcie_bus_release dongle. So no need to
+ * do dongle reset from here
+ */
dhdpcie_dongle_reset(bus);
+#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
}
bus->dhd->is_pcie_watchdog_reset = TRUE;
}
}
+void
+dhd_init_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (!bus->backplane_access_lock) {
+ bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
+ }
+}
+
+void
+dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
+{
+ if (bus->backplane_access_lock) {
+ dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
+ bus->backplane_access_lock = NULL;
+ }
+}
+
/** Detach and free everything */
void
dhdpcie_bus_release(dhd_bus_t *bus)
dhdpcie_free_irq(bus);
}
dhd_deinit_bus_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
/**
* dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
* access Dongle registers.
/* Finally free bus info */
MFREE(osh, bus, sizeof(dhd_bus_t));
+ g_dhd_bus = NULL;
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
return;
}
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
+ return;
+ }
+
if (bus->sih) {
if (!dongle_isolation &&
dhdpcie_dongle_reset(bus);
}
+ dhdpcie_dongle_flr_or_pwr_toggle(bus);
+
if (bus->ltrsleep_on_unload) {
si_corereg(bus->sih, bus->sih->buscoreidx,
OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
/** Stop bus module: clear pending frames, disable data flow */
void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
- uint32 status;
unsigned long flags, flags_bus;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_DISABLE_RUNTIME_PM(bus->dhd);
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
dhdpcie_bus_intr_disable(bus);
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ if (!bus->is_linkdown) {
+ uint32 status;
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ }
if (!dhd_download_fw_on_driverload) {
dhd_dpc_kill(bus->dhd);
#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
#define MAX_VID_LEN 8
-#define MAX_VNAME_LEN 30
-#define CIS_TUPLE_HDR_LEN 2
+#define CIS_TUPLE_HDR_LEN 2
#if defined(BCM4361_CHIP)
#define CIS_TUPLE_START_ADDRESS 0x18011110
#define CIS_TUPLE_END_ADDRESS 0x18011167
#define CIS_TUPLE_START_ADDRESS 0x18011120
#define CIS_TUPLE_END_ADDRESS 0x18011177
#endif /* defined(BCM4361_CHIP) */
-#define CIS_TUPLE_MAX_COUNT (CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
- + 1) / sizeof(uint32)
+#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
+ + 1) / sizeof(uint32))
#define CIS_TUPLE_TAG_START 0x80
#define CIS_TUPLE_TAG_VENDOR 0x81
#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
{ {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
{ {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
{ {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
+ { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
{ {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
- { {"e43_cs51"}, {"_CS00_semco_b1"}, {"_b1"} },
+ { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
+ { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
{ {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
{ {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
{ {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
{ {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
{ {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
- { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} }
+ { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
+ { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
+ { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
+ { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
+ { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
+ { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
};
static naming_info_t *
return ret;
}
-void
-dhd_set_path_params(struct dhd_bus *bus)
-{
- /* External conf takes precedence if specified */
- dhd_conf_preinit(bus->dhd);
-
- if (bus->dhd->clm_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
- }
- dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
- if (bus->dhd->conf_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
- }
-#ifdef CONFIG_PATH_AUTO_SELECT
- dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
-#endif
-
- dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
-
- dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
- dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
- dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
-
- printf("Final fw_path=%s\n", bus->fw_path);
- printf("Final nv_path=%s\n", bus->nv_path);
- printf("Final clm_path=%s\n", bus->dhd->clm_path);
- printf("Final conf_path=%s\n", bus->dhd->conf_path);
-
-}
-
void
dhd_set_bus_params(struct dhd_bus *bus)
{
DHD_OS_WAKE_LOCK(bus->dhd);
- dhd_set_path_params(bus);
+ dhd_conf_set_path_params(bus->dhd, NULL, NULL, bus->fw_path, bus->nv_path);
dhd_set_bus_params(bus);
ret = _dhdpcie_download_firmware(bus);
{
int bcmerror = BCME_ERROR;
int offset = 0;
-#if defined(DHD_FW_MEM_CORRUPTION)
- uint8 *p_org_fw = NULL;
- uint32 org_fw_size = 0;
- uint32 fw_write_offset = 0;
-#endif /* DHD_FW_MEM_CORRUPTION */
int len = 0;
bool store_reset;
char *imgbuf = NULL;
int offset_end = bus->ramsize;
uint32 file_size = 0, read_len = 0;
+#if defined(DHD_FW_MEM_CORRUPTION)
+ if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
+ dhd_tcm_test_enable = TRUE;
+ } else {
+ dhd_tcm_test_enable = FALSE;
+ }
+#endif /* DHD_FW_MEM_CORRUPTION */
+ DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
+ /* TCM check */
+ if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
+ DHD_ERROR(("dhd_bus_tcm_test failed\n"));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
/* Should succeed in opening image if it is actually given through registry
memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
}
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- org_fw_size = file_size;
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd,
- DHD_PREALLOC_MEMDUMP_RAM, org_fw_size);
-#else
- p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- if (p_org_fw == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
- __FUNCTION__, org_fw_size));
- bcmerror = BCME_NOMEM;
- goto err;
- } else {
- memset(p_org_fw, 0, org_fw_size);
- }
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
-
/* check if CR4/CA7 */
store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
bcmerror = BCME_ERROR;
goto err;
}
-
read_len += len;
if (read_len > file_size) {
DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
}
offset += MEMBLOCK;
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- memcpy((p_org_fw + fw_write_offset), memptr, len);
- fw_write_offset += len;
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
if (offset >= offset_end) {
DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
break;
}
}
-#ifdef DHD_FW_MEM_CORRUPTION
- /* Read and compare the downloaded code */
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- unsigned char *p_readback_buf = NULL;
- uint32 compared_len;
- uint32 remaining_len = 0;
-
- compared_len = 0;
- p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK);
- if (p_readback_buf == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
- __FUNCTION__, MEMBLOCK));
- bcmerror = BCME_NOMEM;
- goto compare_err;
- }
- /* Read image to verify downloaded contents. */
- offset = bus->dongle_ram_base;
-
- while (compared_len < org_fw_size) {
- memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK);
- remaining_len = org_fw_size - compared_len;
-
- if (remaining_len >= MEMBLOCK) {
- len = MEMBLOCK;
- } else {
- len = remaining_len;
- }
- bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
- (uint8 *)p_readback_buf, len);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
- __FUNCTION__, bcmerror, MEMBLOCK, offset));
- goto compare_err;
- }
-
- if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) {
- DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
- __FUNCTION__, compared_len));
- bcmerror = BCME_ERROR;
- goto compare_err;
- }
-
- compared_len += len;
- offset += len;
- }
- DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
-
-compare_err:
- if (p_readback_buf) {
- MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK);
- }
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
-
err:
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (p_org_fw) {
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size);
-#else
- VMFREE(bus->dhd->osh, p_org_fw, org_fw_size);
-#endif // endif
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
if (memblock) {
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
if (dhd_msg_level & DHD_TRACE_VAL) {
if (mbuffer == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
bcmerror = BCME_NOMEM;
- goto done;
+ goto done2;
}
}
if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
bcmerror = BCME_NOMEM;
- goto done;
+ goto done2;
}
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
dhd_bus_pcie_pwr_req(bus);
}
if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
- goto done;
+ goto done1;
}
bcm_binit(&strbuf, data, size);
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_exp_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done;
+ goto done1;
}
str[maxstrlen - 1] = '\0';
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_file_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done;
+ goto done1;
}
str[maxstrlen - 1] = '\0';
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
bus->dhd->dongle_trap_occured = TRUE;
- goto done;
+ goto done1;
}
dhd_bus_dump_trap_info(bus, &strbuf);
}
if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
- /* wake up IOCTL wait event */
- dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
-
dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
/* save core dump or write to a file */
if (bus->dhd->memdump_enabled) {
#ifdef DHD_SSSR_DUMP
- if (bus->dhd->sssr_inited) {
- dhdpcie_sssr_dump(bus->dhd);
- }
+ bus->dhd->collect_sssr = TRUE;
#endif /* DHD_SSSR_DUMP */
bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
dhdpcie_mem_dump(bus);
bus->dhd->dongle_trap_occured = TRUE;
}
+ /* wake up IOCTL wait event */
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
+
dhd_schedule_reset(bus->dhd);
}
+done1:
+ if (MULTIBP_ENAB(bus->sih)) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-
-done:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
+done2:
if (mbuffer)
MFREE(bus->dhd->osh, mbuffer, msize);
if (str)
#if defined(DHD_FW_COREDUMP)
static int
-dhdpcie_mem_dump(dhd_bus_t *bus)
+dhdpcie_get_mem_dump(dhd_bus_t *bus)
{
- int ret = 0;
- int size; /* Full mem size */
- int start = bus->dongle_ram_base; /* Start address */
+ int ret = BCME_OK;
+ int size = 0;
+ int start = 0;
int read_size = 0; /* Read size of each iteration */
- uint8 *buf = NULL, *databuf = NULL;
+ uint8 *p_buf = NULL, *databuf = NULL;
-#ifdef EXYNOS_PCIE_DEBUG
- exynos_pcie_register_dump(1);
-#endif /* EXYNOS_PCIE_DEBUG */
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
+ if (!bus->dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
return BCME_ERROR;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ }
+
+ size = bus->ramsize; /* Full mem size */
+ start = bus->dongle_ram_base; /* Start address */
/* Get full mem size */
- size = bus->ramsize;
- buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
return BCME_ERROR;
}
/* Read mem content */
DHD_TRACE_HW4(("Dump dongle memory\n"));
- databuf = buf;
- while (size)
- {
+ databuf = p_buf;
+ while (size > 0) {
read_size = MIN(MEMBLOCK, size);
- if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
- {
+ ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
+ if (ret) {
DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
#ifdef DHD_DEBUG_UART
bus->dhd->memdump_success = FALSE;
#endif /* DHD_DEBUG_UART */
- return BCME_ERROR;
+ break;
}
DHD_TRACE(("."));
start += read_size;
databuf += read_size;
}
-#ifdef DHD_DEBUG_UART
- bus->dhd->memdump_success = TRUE;
-#endif /* DHD_DEBUG_UART */
- dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+ return ret;
+}
+
+static int
+dhdpcie_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret;
+
+#ifdef EXYNOS_PCIE_DEBUG
+ exynos_pcie_register_dump(1);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+ if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
+ return BCME_ERROR;
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+
+ ret = dhdpcie_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ return ret;
+ }
+
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
/* buf, actually soc_ram free handled in dhd_{free,clear} */
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
return ret;
}
+int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdpcie_get_mem_dump(dhdp->bus);
+}
+
int
dhd_bus_mem_dump(dhd_pub_t *dhdp)
{
return BCME_ERROR;
}
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ /* Try to resume if already suspended or suspend in progress */
+
+ /* Skip if still in suspended or suspend in progress */
+ if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
return BCME_ERROR;
DHD_OS_WAKE_UNLOCK(dhdp);
return ret;
}
-
-int
-dhd_dongle_mem_dump(void)
-{
- if (!g_dhd_bus) {
- DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
- return -ENODEV;
- }
-
- dhd_bus_dump_console_buffer(g_dhd_bus);
- dhd_prot_debug_info_print(g_dhd_bus->dhd);
-
- g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
- g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
-
- dhd_bus_mem_dump(g_dhd_bus->dhd);
- return 0;
-}
-EXPORT_SYMBOL(dhd_dongle_mem_dump);
#endif /* DHD_FW_COREDUMP */
int
}
while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
- PKTORPHAN(txp, bus->dhd->conf->tsq);
+ if (bus->dhd->conf->orphan_move <= 1)
+ PKTORPHAN(txp, bus->dhd->conf->tsq);
/*
* Modifying the packet length caused P2P cert failures.
}
}
+#ifdef DHD_HP2P
+ if (!flow_ring_node->hp2p_ring) {
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id);
+ }
+#else
dhd_prot_txdata_write_flush(bus->dhd, flow_id);
-
+#endif // endif
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
void
dhd_bus_start_queue(struct dhd_bus *bus)
{
+ /*
+ * Tx queue has been stopped due to resource shortage (or)
+ * bus is not in a state to turn on.
+ *
+ * Note that we try to re-start network interface only
+ * when we have enough resources, one has to first change the
+ * flag indicating we have all the resources.
+ */
+ if (dhd_prot_check_tx_resource(bus->dhd)) {
+ DHD_ERROR(("%s: Interface NOT started, previously stopped "
+ "due to resource shortage\n", __FUNCTION__));
+ return;
+ }
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
}
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
}
+void
+dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
+{
+ dhdpcie_os_setbar1win(bus, addr);
+}
+
/** 'offset' is a backplane address */
void
dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
{
- W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm8(bus, offset, data);
+ }
}
uint8
dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
{
volatile uint8 data;
- data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint8)-1;
+ } else {
+ data = dhdpcie_os_rtcm8(bus, offset);
+ }
return data;
}
void
dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
{
- W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm32(bus, offset, data);
+ }
}
void
dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
{
- W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm16(bus, offset, data);
+ }
}
#ifdef DHD_SUPPORT_64BIT
void
dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
{
- W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ } else {
+ dhdpcie_os_wtcm64(bus, offset, data);
+ }
}
#endif /* DHD_SUPPORT_64BIT */
dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
{
volatile uint16 data;
- data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint16)-1;
+ } else {
+ data = dhdpcie_os_rtcm16(bus, offset);
+ }
return data;
}
dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
{
volatile uint32 data;
- data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint32)-1;
+ } else {
+ data = dhdpcie_os_rtcm32(bus, offset);
+ }
return data;
}
dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
{
volatile uint64 data;
- data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+ if (bus->is_linkdown) {
+ DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
+ data = (uint64)-1;
+ } else {
+ data = dhdpcie_os_rtcm64(bus, offset);
+ }
return data;
}
#endif /* DHD_SUPPORT_64BIT */
DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
name, (set ? "set" : "get"), len, plen));
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
/* Look up var locally; if not found pass to host driver */
if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
goto exit;
}
+ if (MULTIBP_ENAB(bus->sih)) {
+ if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
+ DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+ }
+
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
*/
bcmerror = BCME_OK;
} else {
if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
+ if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) {
+ DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
+ } else {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
}
}
return bcmerror;
#define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
#define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
#else
-#define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
+#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
#define DHD_SSRESET_STATUS_RETRY_DELAY 40u
#endif // endif
-#define DHD_SSRESET_STATUS_RETRIES 50u
+/*
+ * Increase SSReset de-assert time to 8ms.
+ * since it takes longer time if re-scan time on 4378B0.
+ */
+#define DHD_SSRESET_STATUS_RETRIES 200u
+
+static void
+dhdpcie_enum_reg_init(dhd_bus_t *bus)
+{
+ /* initialize Function control register (clear bit 4) to HW init value */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
+ PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
+
+ /* clear IntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
+ /* clear IntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
+
+ /* clear MSIVector */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
+ /* clear MSIIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
+ /* clear MSIIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
+
+ /* clear PowerIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
+ /* clear PowerIntStatus */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
+
+ /* clear MailboxIntMask */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
+ /* clear MailboxInt */
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
+}
int
dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
{
- bool flr_capab;
+ uint flr_capab;
uint val;
int retry = 0;
DHD_ERROR(("******** Perform FLR ********\n"));
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
+ if (bus->pcie_mailbox_mask != 0) {
+ dhdpcie_bus_intr_disable(bus);
+ }
+ /* initialize F0 enum registers before FLR for rev66/67 */
+ dhdpcie_enum_reg_init(bus);
+ }
+
/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
- DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
+ DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
if (!flr_capab) {
DHD_ERROR(("Chip does not support FLR\n"));
}
/* Save pcie config space */
- DHD_ERROR(("Save Pcie Config Space\n"));
+ DHD_INFO(("Save Pcie Config Space\n"));
DHD_PCIE_CONFIG_SAVE(bus);
/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
- DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
+ DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
if (force_fail) {
}
/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
+ DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
+ DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
- DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
+ DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
"is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
do {
val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
bus->flr_force_fail = FALSE;
DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
}
- return BCME_ERROR;
+ return BCME_DONGLE_DOWN;
}
/* Restore pcie config space */
- DHD_ERROR(("Restore Pcie Config Space\n"));
+ DHD_INFO(("Restore Pcie Config Space\n"));
DHD_PCIE_CONFIG_RESTORE(bus);
DHD_ERROR(("******** FLR Succedeed ********\n"));
if (flag == TRUE) { /* Turn off WLAN */
/* Removing Power */
DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
-
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
bus->dhd->up = FALSE;
/* wait for other contexts to finish -- if required a call
dhdpcie_free_irq(bus);
}
dhd_deinit_bus_lock(bus);
+ dhd_deinit_backplane_access_lock(bus);
dhd_bus_release_dongle(bus);
dhdpcie_bus_free_resource(bus);
bcmerror = dhdpcie_bus_disable_device(bus);
}
#endif /* CONFIG_ARCH_MSM */
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
__FUNCTION__, bcmerror));
goto done;
}
+#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
+ dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
#endif /* CONFIG_ARCH_MSM */
bus->is_linkdown = 0;
+ bus->cto_triggered = 0;
bcmerror = dhdpcie_bus_enable_device(bus);
if (bcmerror) {
DHD_ERROR(("%s: host configuration restore failed: %d\n",
bus->dhd->dongle_reset = FALSE;
+#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
+ dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
+#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+
bcmerror = dhd_bus_start(dhdp);
if (bcmerror) {
DHD_ERROR(("%s: dhd_bus_start: %d\n",
done:
if (bcmerror) {
DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
}
serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
{
uint ret;
- dhd_pcie_backplane_access_lock(bus->dhd);
+ unsigned long flags;
+ DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
ret = si_backplane_access(bus->sih, addr, size, val, read);
- dhd_pcie_backplane_access_unlock(bus->dhd);
+ DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
return ret;
}
break;
case IOV_SVAL(IOV_PCIE_DMAXFER): {
- int int_val4 = 0;
- int wait = 0;
- int core_num = 0;
- if (plen >= (int)sizeof(int_val) * 4) {
- bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
- &int_val4, sizeof(int_val4));
- }
- if (plen >= (int)sizeof(int_val) * 5) {
- bcopy((void*)((uintptr)params + 4 * sizeof(int_val)),
- &wait, sizeof(wait));
- }
- if (plen >= (int)sizeof(core_num) * 6) {
- bcopy((void*)((uintptr)params + 5 * sizeof(core_num)),
- &core_num, sizeof(core_num));
- }
- bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3,
- int_val4, core_num, wait);
- if (wait && bcmerror >= 0) {
- /* get the status of the dma transfer */
- int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd);
- bcopy(&int_val4, params, sizeof(int_val));
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
+
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
+ }
+
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
+ dmaxfer->src_delay, dmaxfer->dest_delay,
+ dmaxfer->type, dmaxfer->core_num,
+ dmaxfer->should_wait);
+
+ if (dmaxfer->should_wait && bcmerror >= 0) {
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
}
break;
}
case IOV_GVAL(IOV_PCIE_DMAXFER): {
- int dma_status = 0;
- dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd);
- bcopy(&dma_status, arg, val_size);
- bcmerror = BCME_OK;
+ dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
+ if (!dmaxfer)
+ return BCME_BADARG;
+ if (dmaxfer->version != DHD_DMAXFER_VERSION)
+ return BCME_VERSION;
+ if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
+ return BCME_BADLEN;
+ }
+ bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
break;
}
break;
case IOV_SVAL(IOV_CTO_PREVENTION):
- {
- uint32 pcie_lnkst;
-
- if (bus->sih->buscorerev < 19) {
- bcmerror = BCME_UNSUPPORTED;
- break;
- }
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
-
- pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configdata), 0, 0);
-
- if ((bus->sih->buscorerev == 19) &&
- (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
- PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
- bcmerror = BCME_UNSUPPORTED;
- break;
- }
- bus->cto_enable = bool_val;
- dhdpcie_cto_init(bus, bus->cto_enable);
- DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
- __FUNCTION__, bus->cto_enable));
- }
+ bcmerror = dhdpcie_cto_init(bus, bool_val);
break;
case IOV_GVAL(IOV_CTO_PREVENTION):
}
break;
+ case IOV_GVAL(IOV_HWA_ENAB_BMAP):
+ int_val = bus->hwa_enab_bmap;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HWA_ENAB_BMAP):
+ bus->hwa_enab_bmap = (uint8)int_val;
+ break;
case IOV_GVAL(IOV_IDMA_ENABLE):
int_val = bus->idma_enabled;
bcopy(&int_val, arg, val_size);
bus->dar_enabled = (bool)int_val;
break;
case IOV_GVAL(IOV_HSCBSIZE):
- dhd_get_hscb_info(bus->dhd->prot, NULL, (uint32 *)arg);
+ bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
+ break;
+
+#ifdef DHD_HP2P
+ case IOV_SVAL(IOV_HP2P_ENABLE):
+ dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
break;
- case IOV_GVAL(IOV_HSCBBYTES):
- dhd_get_hscb_buff(bus->dhd->prot, int_val, int_val2, (void*)arg);
+
+ case IOV_GVAL(IOV_HP2P_ENABLE):
+ int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
+ dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
+ int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
+ dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
+ int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
+ dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
+ int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
+ int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+#endif /* DHD_HP2P */
+ case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->extdtxs_in_txcpl = TRUE;
+ else
+ bus->dhd->extdtxs_in_txcpl = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
+ int_val = bus->dhd->extdtxs_in_txcpl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ return BCME_NOTDOWN;
+ }
+ if (int_val)
+ bus->dhd->hostrdy_after_init = TRUE;
+ else
+ bus->dhd->hostrdy_after_init = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
+ int_val = bus->dhd->hostrdy_after_init;
+ bcopy(&int_val, arg, val_size);
break;
default:
return 0;
}
+void
+dhd_bus_dump_dar_registers(struct dhd_bus *bus)
+{
+ uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
+ dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
+ uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
+ dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
+
+ if (bus->is_linkdown && !bus->cto_triggered) {
+ DHD_ERROR(("%s: link is down\n", __FUNCTION__));
+ return;
+ }
+
+ dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
+ dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
+ dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
+ dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
+ dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
+ dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
+
+ if (bus->sih->buscorerev < 24) {
+ DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
+ __FUNCTION__, bus->sih->buscorerev));
+ return;
+ }
+
+ dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
+ dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
+ dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
+ dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
+ dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
+ dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
+
+ DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
+ __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
+ dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
+
+ DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
+ __FUNCTION__, dar_errlog_reg, dar_errlog_val,
+ dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
+}
+
/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
void
dhd_bus_hostready(struct dhd_bus *bus)
return;
}
- DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+ DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
+
+ dhd_bus_dump_dar_registers(bus);
+
si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
bus->hostready_count ++;
- DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+ DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
}
/* Clear INTSTATUS */
return -EBUSY;
}
- bus->last_suspend_start_time = OSL_SYSUPTIME_US();
+ bus->last_suspend_start_time = OSL_LOCALTIME_NS();
/* stop all interface network queue. */
dhd_bus_stop_queue(bus);
}
#else
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
-
/* Clear wait_for_d3_ack before sending D3_INFORM */
bus->wait_for_d3_ack = 0;
/*
__FUNCTION__, intstatus, host_irq_disabled));
dhd_pcie_intr_count_dump(bus->dhd);
dhd_print_tasklet_status(bus->dhd);
- dhd_prot_process_ctrlbuf(bus->dhd);
+ if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
+ !bus->use_mailbox) {
+ dhd_prot_process_ctrlbuf(bus->dhd);
+ } else {
+ dhdpcie_handle_mb_data(bus);
+ }
timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
/* Clear Interrupts */
dhdpcie_bus_clear_intstatus(bus);
if (bus->wait_for_d3_ack) {
DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
-
/* Got D3 Ack. Suspend the bus */
if (active) {
DHD_ERROR(("%s():Suspend failed because of wakelock"
}
#if defined(BCMPCIE_OOB_HOST_WAKE)
- dhdpcie_oob_intr_set(bus, TRUE);
+ if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
+ DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
+ } else {
+ dhdpcie_oob_intr_set(bus, TRUE);
+ }
#endif /* BCMPCIE_OOB_HOST_WAKE */
DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->d3ackcnt_timeout = 0;
bus->dhd->busstate = DHD_BUS_SUSPEND;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
- "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__,
- PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR1, sizeof(uint32))));
dhdpcie_dump_resource(bus);
/* Handle Host Suspend */
rc = dhdpcie_pci_suspend_resume(bus, state);
if (!rc) {
- bus->last_suspend_end_time = OSL_SYSUPTIME_US();
+ bus->last_suspend_end_time = OSL_LOCALTIME_NS();
}
}
} else if (timeleft == 0) { /* D3 ACK Timeout */
+#ifdef DHD_FW_COREDUMP
+ uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
+#endif /* DHD_FW_COREDUMP */
+
+ /* check if the D3 ACK timeout due to scheduling issue */
+ bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
+ bus->isr_entry_time > bus->last_d3_inform_time &&
+ dhd_bus_query_dpc_sched_errors(bus->dhd);
bus->dhd->d3ack_timeout_occured = TRUE;
/* If the D3 Ack has timeout */
bus->dhd->d3ackcnt_timeout++;
- DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
- __FUNCTION__, bus->dhd->d3ackcnt_timeout));
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
+ __FUNCTION__, bus->dhd->is_sched_error ?
+ " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
DHD_BUS_LOCK(bus->bus_lock, flags_bus);
bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
/* resume all interface network queue. */
dhd_bus_start_queue(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- if (!bus->dhd->dongle_trap_occured) {
+ if (!bus->dhd->dongle_trap_occured &&
+ !bus->is_linkdown &&
+ !bus->cto_triggered) {
uint32 intstatus = 0;
/* Check if PCIe bus status is valid */
dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
#ifdef DHD_FW_COREDUMP
- if (bus->dhd->memdump_enabled) {
+ if (cur_memdump_mode) {
/* write core dump to file */
bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
dhdpcie_mem_dump(bus);
} else {
/* Resume */
DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
- bus->last_resume_start_time = OSL_SYSUPTIME_US();
+ bus->last_resume_start_time = OSL_LOCALTIME_NS();
/**
* PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
rc = dhdpcie_pci_suspend_resume(bus, state);
- DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
- __FUNCTION__, PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
dhdpcie_dump_resource(bus);
DHD_BUS_LOCK(bus->bus_lock, flags_bus);
dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
}
- bus->last_resume_end_time = OSL_SYSUPTIME_US();
- /* pro-actively update TCM rd index for EDL ring */
+ bus->last_resume_end_time = OSL_LOCALTIME_NS();
+ /* Update TCM rd index for EDL ring */
DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
}
return rc;
if ((chipid == BCM4375_CHIP_ID ||
chipid == BCM4362_CHIP_ID ||
chipid == BCM43751_CHIP_ID ||
+ chipid == BCM43752_CHIP_ID ||
chipid == BCM4377_CHIP_ID) &&
(d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
len += 8;
}
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
+#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
+#define PCIE_REV_FOR_4378B0 68
+
static int
dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
{
int bcmerror = 0;
volatile uint32 *cr4_regs;
+ bool do_flr;
if (!bus->sih) {
DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
return BCME_ERROR;
}
+
+ do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
+ (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
+
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req(bus);
+ }
+
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
*/
if (enter) {
+
/* Make sure BAR1 maps to backplane address 0 */
- dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
+ dhdpcie_setbar1win(bus, 0x00000000);
bus->alp_only = TRUE;
/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
/* Always return to PCIE core */
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ if (MULTIBP_ENAB(bus->sih) && !do_flr) {
+ dhd_bus_pcie_pwr_req_clear(bus);
+ }
+
return bcmerror;
} /* dhdpcie_bus_download_state */
void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
+ dhd_bus_t *bus;
+ uint64 current_time = OSL_LOCALTIME_NS();
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ bus = dhd->bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
- "dpc_return_busdown_count=%lu\n",
- dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
- dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
- dhd->bus->dpc_return_busdown_count);
+ "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
+ bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count);
#ifdef BCMPCIE_OOB_HOST_WAKE
bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
- " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
- dhd->bus->oob_intr_count, dhd->bus->oob_intr_enable_count,
- dhd->bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus),
- dhd->bus->last_oob_irq_time);
+ " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
+ " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
+ " oob_irq_enabled=%d oob_gpio_level=%d\n",
+ bus->oob_intr_count, bus->oob_intr_enable_count,
+ bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
+ GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level());
#endif /* BCMPCIE_OOB_HOST_WAKE */
- bcm_bprintf(strbuf, "\ncurrent_time=%llu isr_entry_time=%llu isr_exit_time=%llu\n"
- "dpc_entry_time=%llu last_process_ctrlbuf_time=%llu "
- "last_process_flowring_time=%llu last_process_txcpl_time=%llu\n"
- "last_process_rxcpl_time=%llu last_process_infocpl_time=%llu "
- "dpc_exit_time=%llu resched_dpc_time=%llu\n",
- OSL_SYSUPTIME_US(), dhd->bus->isr_entry_time, dhd->bus->isr_exit_time,
- dhd->bus->dpc_entry_time, dhd->bus->last_process_ctrlbuf_time,
- dhd->bus->last_process_flowring_time, dhd->bus->last_process_txcpl_time,
- dhd->bus->last_process_rxcpl_time, dhd->bus->last_process_infocpl_time,
- dhd->bus->dpc_exit_time, dhd->bus->resched_dpc_time);
-
- bcm_bprintf(strbuf, "\nlast_suspend_start_time=%llu last_suspend_end_time=%llu"
- " last_resume_start_time=%llu last_resume_end_time=%llu\n",
- dhd->bus->last_suspend_start_time, dhd->bus->last_suspend_end_time,
- dhd->bus->last_resume_start_time, dhd->bus->last_resume_end_time);
+ bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
+ " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
+ " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
+ "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
+ " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
+ " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
+ "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
+ "last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
+ GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
+ GET_SEC_USEC(bus->last_process_ctrlbuf_time),
+ GET_SEC_USEC(bus->last_process_flowring_time),
+ GET_SEC_USEC(bus->last_process_txcpl_time),
+ GET_SEC_USEC(bus->last_process_rxcpl_time),
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time),
+ GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
+ GET_SEC_USEC(bus->last_d3_inform_time));
+
+ bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
+ SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
+ SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
+ GET_SEC_USEC(bus->last_suspend_end_time),
+ GET_SEC_USEC(bus->last_resume_start_time),
+ GET_SEC_USEC(bus->last_resume_end_time));
#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
- bcm_bprintf(strbuf, "logtrace_thread_entry_time=%llu "
- "logtrace_thread_sem_down_time=%llu "
- "logtrace_thread_flush_time=%llu "
- "logtrace_thread_unexpected_break_time=%llu "
- " logtrace_thread_complete_time=%llu\n",
- dhd->logtrace_thr_ts.entry_time, dhd->logtrace_thr_ts.sem_down_time,
- dhd->logtrace_thr_ts.flush_time, dhd->logtrace_thr_ts.unexpected_break_time,
- dhd->logtrace_thr_ts.complete_time);
+ bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
+ GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
}
bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
+#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
+ bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
+ dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
+#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
bcm_bprintf(strbuf,
"%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
bcm_bprintf(strbuf, "\n");
}
- bcm_bprintf(strbuf, "\n");
- ix = 0;
- bcm_bprintf(strbuf, "%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n",
- "Num:", "Flow", "If", " ACKED", "D11SPRS", "WLSPRS", "TSDWL",
- "NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED");
- for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
- flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
- if (!flow_ring_node->active)
- continue;
-
- flow_info = &flow_ring_node->flow_info;
- bcm_bprintf(strbuf, "%4d %4d %2d ",
- ix++, flow_ring_node->flowid, flow_info->ifindex);
- bcm_bprintf(strbuf,
- "%5d %7d %6d %5d %5d %10d %7d %7d %7d\n",
- "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA");
- }
-
#ifdef TX_STATUS_LATENCY_STATS
- bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
+ bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
for (ix = 0; ix < DHD_MAX_IFS; ix++) {
if (!if_flow_lkup[ix].status) {
if_tx_status_latency[ix].num_tx_status);
}
#endif /* TX_STATUS_LATENCY_STATS */
+
+#ifdef DHD_HP2P
+ if (dhdp->hp2p_capable) {
+ bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
+
+ for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
+ hp2p_info_t *hp2p_info;
+ int bin;
+
+ hp2p_info = &dhdp->hp2p_info[flowid];
+ if (hp2p_info->num_timer_start == 0)
+ continue;
+
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%2d %20d %16d", bin,
+ hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
+ bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
+ bcm_bprintf(strbuf, "\n%s", "Bin");
+
+ for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
+ bcm_bprintf(strbuf, "\n%d %20d", bin,
+ hp2p_info->rx_t0[bin]);
+ }
+
+ bcm_bprintf(strbuf, "\n%s %16s %16s",
+ "Packet limit", "Timer limit", "Timer start");
+ bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
+ hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ }
+#endif /* DHD_HP2P */
+
bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
}
+#ifdef DNGL_AXI_ERROR_LOGGING
+bool
+dhd_axi_sig_match(dhd_pub_t *dhdp)
+{
+ uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
+
+ if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
+ DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
+ __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
+ dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
+ if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
+ axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
+ uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
+ OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
+ if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ return TRUE;
+ } else {
+ DHD_ERROR(("%s: No AXI signature: 0x%x\n",
+ __FUNCTION__, axi_signature));
+ return FALSE;
+ }
+ } else {
+ DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+void
+dhd_axi_error(dhd_pub_t *dhdp)
+{
+ dhd_axi_error_dump_t *axi_err_dump;
+ uint8 *axi_err_buf = NULL;
+ uint8 *p_axi_err = NULL;
+ uint32 axi_logbuf_addr;
+ uint32 axi_tcm_addr;
+ int err, size;
+
+ OSL_DELAY(75000);
+
+ axi_logbuf_addr = dhdp->axierror_logbuf_addr;
+ if (!axi_logbuf_addr) {
+ DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ axi_err_dump = dhdp->axi_err_dump;
+ if (!axi_err_dump) {
+ goto sched_axi;
+ }
+
+ if (!dhd_axi_sig_match(dhdp)) {
+ goto sched_axi;
+ }
+
+ /* Reading AXI error data for SMMU fault */
+ DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
+ axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
+ size = sizeof(hnd_ext_trap_axi_error_v1_t);
+ axi_err_buf = MALLOCZ(dhdp->osh, size);
+ if (axi_err_buf == NULL) {
+ DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
+ goto sched_axi;
+ }
+
+ p_axi_err = axi_err_buf;
+ err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
+ if (err) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, err, size, axi_tcm_addr));
+ goto sched_axi;
+ }
+
+ /* Dump data to Dmesg */
+ dhd_log_dump_axi_error(axi_err_buf);
+ err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
+ if (err) {
+ DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
+ __FUNCTION__, err));
+ }
+
+sched_axi:
+ if (axi_err_buf) {
+ MFREE(dhdp->osh, axi_err_buf, size);
+ }
+ dhd_schedule_axi_error_dump(dhdp, NULL);
+}
+
+static void
+dhd_log_dump_axi_error(uint8 *axi_err)
+{
+ dma_dentry_v1_t dma_dentry;
+ dma_fifo_v1_t dma_fifo;
+ int i = 0, j = 0;
+
+ if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
+ hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
+ DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
+ DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
+ DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
+ DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
+ __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
+ DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_status));
+ DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_core));
+ DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_hi));
+ DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_lo));
+ DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
+ __FUNCTION__, axi_err_v1->axi_errorlog_id));
+
+ for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
+ dma_fifo = axi_err_v1->dma_fifo[i];
+ DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
+ DHD_ERROR(("%s: direction:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.direction));
+ DHD_ERROR(("%s: index:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.index));
+ DHD_ERROR(("%s: dpa:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dpa));
+ DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_lo));
+ DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.desc_hi));
+ DHD_ERROR(("%s: din:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.din));
+ DHD_ERROR(("%s: dout:%d : 0x%x\n",
+ __FUNCTION__, i, dma_fifo.dout));
+ for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
+ dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
+ DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl1));
+ DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.ctrl2));
+ DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrlo));
+ DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
+ __FUNCTION__, i, dma_dentry.addrhi));
+ }
+ }
+ }
+ else {
+ DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
/**
* Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
* flow queue to their flow ring.
flow_ring_node_t *flow_ring_node;
struct dhd_bus *bus = dhd->bus;
+ if (dhd_query_bus_erros(dhd)) {
+ return;
+ }
+
/* Hold flowring_list_lock to ensure no race condition while accessing the List */
DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
for (item = dll_head_p(&bus->flowring_active_list);
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
DHD_INFO(("writing a door bell 2 to the device\n"));
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
return;
}
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
+
+#ifdef DHD_DB0TS
+ if (bus->dhd->db0ts_capable) {
+ uint64 ts;
+
+ ts = local_clock();
+ do_div(ts, 1000);
+
+ value = htol32(ts & 0xFFFFFFFF);
+ DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
+ }
+#endif /* DHD_DB0TS */
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
}
return;
}
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
if (DAR_PWRREQ(bus)) {
dhd_bus_pcie_pwr_req(bus);
}
__FUNCTION__, bus->bus_low_power_state));
return;
}
+
+ /* Skip in the case of link down */
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
}
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- bus->dpc_entry_time = OSL_SYSUPTIME_US();
+ bus->dpc_entry_time = OSL_LOCALTIME_NS();
DHD_GENERAL_LOCK(bus->dhd, flags);
/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
* which has been disabled in the dhdpcie_bus_isr()
*/
dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
- bus->dpc_exit_time = OSL_SYSUPTIME_US();
+ bus->dpc_exit_time = OSL_LOCALTIME_NS();
} else {
- bus->resched_dpc_time = OSL_SYSUPTIME_US();
+ bus->resched_dpc_time = OSL_LOCALTIME_NS();
}
bus->dpc_sched = resched;
done:
if (h2d_mb_data == H2D_HOST_D3_INFORM) {
DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+ bus->last_d3_inform_time = OSL_LOCALTIME_NS();
bus->d3_inform_cnt++;
}
if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
dhdpcie_bus_clear_intstatus(bus);
dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
- /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
- bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
+ /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
+ bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
+ DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
+ }
DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- bus->wait_for_d3_ack = 1;
- dhd_os_d3ack_wake(bus->dhd);
+ /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
+ * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
+ */
+ if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+ } else {
+ DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
+ }
}
void
dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
- if (d2h_mb_data & D2H_DEV_FWHALT) {
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
DHD_ERROR(("FW trap has happened\n"));
dhdpcie_checkdied(bus, NULL, 0);
dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
BCM_REFERENCE(ds_acked);
if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
goto exit;
}
dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
{
bool resched = FALSE;
+ unsigned long flags_bus;
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req(bus);
if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
bus->api.handle_mb_data(bus);
- if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox &&
- (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) {
- DHD_ERROR(("%s: Bus is in power save state. "
+ /* Do no process any rings after recieving D3_ACK */
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
+ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ DHD_ERROR(("%s: D3 Ack Recieved. "
"Skip processing rest of ring buffers.\n", __FUNCTION__));
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
goto exit;
}
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
/* Validate intstatus only for INTX case */
if ((bus->d2h_intr_method == PCIE_MSI) ||
dhdpci_bus_read_frames(dhd_bus_t *bus)
{
bool more = FALSE;
+ unsigned long flags_bus;
/* First check if there a FW trap */
if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (bus->dhd->axi_error) {
+ DHD_ERROR(("AXI Error happened\n"));
+ return FALSE;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
return FALSE;
}
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
dhd_prot_process_ctrlbuf(bus->dhd);
- bus->last_process_ctrlbuf_time = OSL_SYSUPTIME_US();
+ bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
/* Unlock to give chance for resp to be handled */
DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
- /* Do not process rest of ring buf once bus enters low power state */
- if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) {
- DHD_ERROR(("%s: Bus is in power save state. "
- "Skip processing rest of ring buffers.\n", __FUNCTION__));
+ /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
+ DHD_BUS_LOCK(bus->bus_lock, flags_bus);
+ if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
+ DHD_ERROR(("%s: Bus is in power save state (%d). "
+ "Skip processing rest of ring buffers.\n",
+ __FUNCTION__, bus->bus_low_power_state));
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
return FALSE;
}
+ DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
/* update the flow ring cpls */
dhd_update_txflowrings(bus->dhd);
- bus->last_process_flowring_time = OSL_SYSUPTIME_US();
+ bus->last_process_flowring_time = OSL_LOCALTIME_NS();
/* With heavy TX traffic, we could get a lot of TxStatus
* so add bound
*/
- more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
- bus->last_process_txcpl_time = OSL_SYSUPTIME_US();
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
+ bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
/* With heavy RX traffic, this routine potentially could spend some time
* processing RX frames without RX bound
*/
- more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
- bus->last_process_rxcpl_time = OSL_SYSUPTIME_US();
+#ifdef DHD_HP2P
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
+#endif /* DHD_HP2P */
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
+ bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
/* Process info ring completion messages */
#ifdef EWP_EDL
#endif // endif
{
more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
- bus->last_process_infocpl_time = OSL_SYSUPTIME_US();
+ bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
}
#ifdef EWP_EDL
else {
more |= dhd_prot_process_msgbuf_edl(bus->dhd);
- bus->last_process_edl_time = OSL_SYSUPTIME_US();
+ bus->last_process_edl_time = OSL_LOCALTIME_NS();
}
#endif /* EWP_EDL */
addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
}
+ if (addr == (uint32)-1) {
+ DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
+ bus->is_linkdown = 1;
+ return BCME_ERROR;
+ }
+
if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
(addr > shaddr)) {
DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
__FUNCTION__, addr));
DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
#ifdef DEBUG_DNGL_INIT_FAIL
- bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
- dhdpcie_mem_dump(bus);
+ if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
+ if (bus->dhd->memdump_enabled) {
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
+ dhdpcie_mem_dump(bus);
+ }
+ }
#endif /* DEBUG_DNGL_INIT_FAIL */
return BCME_ERROR;
} else {
}
}
+ /* TODO: This need to be selected based on IPC instead of compile time */
+ bus->dhd->hwa_enable = TRUE;
+
if (idma_en) {
bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
{
ring_info_t ring_info;
+ /* boundary check */
+ if (sh->rings_info_ptr > shaddr) {
+ DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
+ __FUNCTION__, sh->rings_info_ptr));
+ return BCME_ERROR;
+ }
+
if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
return rv;
DHD_ERROR(("FW supports debug buf dest ? %s \n",
bus->dhd->debug_buf_dest_support ? "Y" : "N"));
+#ifdef DHD_HP2P
+ if (bus->dhd->hp2p_enable) {
+ bus->dhd->hp2p_ts_capable =
+ (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
+ bus->dhd->hp2p_capable =
+ (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
+ bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
+
+ DHD_ERROR(("FW supports HP2P ? %s \n",
+ bus->dhd->hp2p_capable ? "Y" : "N"));
+
+ if (bus->dhd->hp2p_capable) {
+ bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
+ bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
+ bus->dhd->time_thresh = HP2P_TIME_THRESH;
+ for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
+ hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
+
+ hp2p_info->hrtimer_init = FALSE;
+ tasklet_hrtimer_init(&hp2p_info->timer,
+ dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ }
+ }
+ }
+#endif /* DHD_HP2P */
+
+#ifdef DHD_DB0TS
+ bus->dhd->db0ts_capable =
+ (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
+#endif /* DHD_DB0TS */
+
if (MULTIBP_ENAB(bus->sih)) {
dhd_bus_pcie_pwr_req_clear(bus);
+
+ /*
+ * WAR to fix ARM cold boot;
+ * De-assert WL domain in DAR
+ */
+ if (bus->sih->buscorerev >= 68) {
+ dhd_bus_pcie_pwr_req_wl_domain(bus,
+ DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
+ }
}
return BCME_OK;
} /* dhdpcie_readshared */
if (!bus->dhd)
return 0;
- if (bus->sih->buscorerev == 66) {
+ if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
dhd_bus_pcie_pwr_req_clear_reload_war(bus);
}
dhd_bus_pcie_pwr_req(bus);
}
+ /* Configure AER registers to log the TLP header */
+ dhd_bus_aer_config(bus);
+
/* Make sure we're talking to the core. */
bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
ASSERT(bus->reg != NULL);
dhd_init_bus_lock(bus);
+ dhd_init_backplane_access_lock(bus);
+
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
{
uint32 addr = 0;
uint32 val = 0;
+
addr = bus->dongle_ram_base + bus->ramsize - 4;
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
}
(device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
return 0;
}
+ if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) ||
+ (device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) {
+ return 0;
+ }
if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
return 0;
dhd_tcpack_info_tbl_clean(bus->dhd);
#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_HP2P
+ if (flow_ring_node->hp2p_ring) {
+ bus->dhd->hp2p_ring_active = FALSE;
+ flow_ring_node->hp2p_ring = FALSE;
+ }
+#endif /* DHD_HP2P */
+
/* clean up BUS level info */
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow create Response failure error status = %d \n",
DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
return;
}
+ /* Boundary check of the flowid */
+ if (flowid >= bus->dhd->num_flow_rings) {
+ DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
+ flowid, bus->dhd->num_flow_rings));
+ return;
+ }
+
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ if (!flow_ring_node) {
+ DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
+ return;
+ }
+
ASSERT(flow_ring_node->flowid == flowid);
+ if (flow_ring_node->flowid != flowid) {
+ DHD_ERROR(("%s: flowid %d is different from the flowid "
+ "of the flow_ring_node %d\n", __FUNCTION__, flowid,
+ flow_ring_node->flowid));
+ return;
+ }
flow_ring_node->status = FLOW_RING_STATUS_OPEN;
return;
dhdp->bus->is_linkdown = val;
}
+int
+dhd_bus_get_linkdown(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->is_linkdown;
+}
+
+int
+dhd_bus_get_cto(dhd_pub_t *dhdp)
+{
+ return dhdp->bus->cto_triggered;
+}
+
#ifdef IDLE_TX_FLOW_MGMT
/* resume request */
int
return 0;
}
-void
-dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+int
+dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
{
uint32 val;
-
if (enable) {
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
- if (bus->cto_threshold == 0) {
- bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
- }
-
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
- ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
- PCIE_CTO_TO_THRESHHOLD_MASK) |
- ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
- PCIE_CTO_CLKCHKCNT_MASK) |
- PCIE_CTO_ENAB_MASK);
} else {
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
+ }
+ return 0;
+}
+int
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+{
+ if (bus->sih->buscorerev < 19) {
+ DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
+ __FUNCTION__, bus->sih->buscorerev));
+ return BCME_UNSUPPORTED;
+ }
+
+ if (bus->sih->buscorerev == 19) {
+ uint32 pcie_lnkst;
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
+
+ pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+
+ if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
+ PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
+ return BCME_UNSUPPORTED;
+ }
+ }
+
+ bus->cto_enable = enable;
+
+ dhdpcie_cto_cfg_init(bus, enable);
+
+ if (enable) {
+ if (bus->cto_threshold == 0) {
+ bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
+ ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
+ PCIE_CTO_TO_THRESHHOLD_MASK) |
+ ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
+ PCIE_CTO_CLKCHKCNT_MASK) |
+ PCIE_CTO_ENAB_MASK);
+ } else {
si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
}
+
+ DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
+ __FUNCTION__, bus->cto_enable));
+
+ return 0;
}
-static void
+static int
dhdpcie_cto_error_recovery(struct dhd_bus *bus)
{
- uint32 pci_intmask, err_status, dar_val;
+ uint32 pci_intmask, err_status;
uint8 i = 0;
uint32 val;
/*
* DAR still accessible
*/
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_INTSTAT(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRLOG(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRADDR(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val));
-
- dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0);
- DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val));
+ dhd_bus_dump_dar_registers(bus);
/* reset backplane */
val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
DHD_ERROR(("cto recovery fail\n"));
DHD_OS_WAKE_UNLOCK(bus->dhd);
- return;
+ return BCME_ERROR;
}
}
PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
DHD_OS_WAKE_UNLOCK(bus->dhd);
+
+ return BCME_OK;
}
void
}
}
+bool
+dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
+{
+ if (!bus->dhd)
+ return FALSE;
+ else if (bus->hwa_enab_bmap) {
+ return bus->dhd->hwa_enable;
+ } else {
+ return FALSE;
+ }
+}
+
bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
{
return dhd->bus->idletime;
}
-#ifdef DHD_SSSR_DUMP
-
static INLINE void
dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
{
OSL_DELAY(1);
- serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read);
- DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
+ DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
+ } else {
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
+ }
return;
}
+#ifdef DHD_SSSR_DUMP
static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
uint addr_reg, uint data_reg)
addr = data_reg;
/* Read 4 bytes at once and loop for fifo_size / 4 */
for (i = 0; i < fifo_size / 4; i++) {
- serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val, TRUE);
+ if (serialized_backplane_access(dhd->bus, addr,
+ sizeof(uint), &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
buf[i] = val;
OSL_DELAY(1);
}
if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
- dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size);
+ int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
+ fifo_size);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
+ __FUNCTION__));
+ }
} else {
/* Check if vasip clk is disabled, if yes enable it */
addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
addr = addr_reg;
/* Read 4 bytes at once and loop for fifo_size / 4 */
for (i = 0; i < fifo_size / 4; i++, addr += 4) {
- serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val,
- TRUE);
+ if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
+ &val, TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
+ addr));
+ return BCME_ERROR;
+ }
buf[i] = val;
OSL_DELAY(1);
}
hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
if (tlv) {
+ uint32 baseaddr = 0;
+ uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
+
etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
return;
}
+
+ /* boundary check */
+ baseaddr = etd_evtlog->log_arr_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + arr_size) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
+
/* read the eventlog_trap_buf_info_t array from dongle memory */
err = dhdpcie_bus_membytes(dhd->bus, FALSE,
(ulong)(etd_evtlog->log_arr_addr),
seqnum = ntoh32(etd_evtlog->seq_num);
memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
for (i = 0; i < (etd_evtlog->num_elements); ++i) {
+ /* boundary check */
+ baseaddr = evtlog_buf_arr[i].buf_addr;
+ if ((baseaddr < dhd->bus->dongle_ram_base) ||
+ ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
+ DHD_ERROR(("%s: Error reading invalid address\n",
+ __FUNCTION__));
+ goto err;
+ }
/* read each individual event log buf from dongle memory */
err = dhdpcie_bus_membytes(dhd->bus, FALSE,
((ulong)evtlog_buf_arr[i].buf_addr),
}
#endif /* BCMPCIE && DHD_LOG_DUMP */
-static int
-dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
+static uint32
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
{
uint addr;
- uint val;
+ uint val = 0;
DHD_ERROR(("%s\n", __FUNCTION__));
dhd_sbreg_op(dhd, addr, &val, TRUE);
if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
- dhd_sbreg_op(dhd, addr, &val, FALSE);
+ dhd_sbreg_op(dhd, addr, ®_val, FALSE);
}
return BCME_OK;
}
-static int
+static uint32
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
{
uint addr;
- uint val;
+ uint val = 0, reg_val = 0;
DHD_ERROR(("%s\n", __FUNCTION__));
/* conditionally clear bits [11:8] of PowerCtrl */
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- dhd_sbreg_op(dhd, addr, &val, TRUE);
- if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
+ dhd_sbreg_op(dhd, addr, ®_val, TRUE);
+ if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
val = 0;
dhd_sbreg_op(dhd, addr, &val, FALSE);
}
- return BCME_OK;
+ return reg_val;
}
static int
val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
dhd_sbreg_op(dhd, addr, &val, FALSE);
}
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
+
+ /* Just halt ARM but do not reset the core */
+ resetctrl &= ~(SI_CORE_SIZE - 1);
+ resetctrl += OFFSETOF(aidmp_t, ioctrl);
+
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ val |= SICF_CPUHALT;
+ dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
+ }
}
return BCME_OK;
}
+static int
+dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
+{
+ uint addr;
+ uint val = 0;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+
+ /* Check if bit 0 of resetctrl is cleared */
+ addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+ if (!(val & 1)) {
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
+
+ /* Take ARM out of halt but do not reset core */
+ resetctrl &= ~(SI_CORE_SIZE - 1);
+ resetctrl += OFFSETOF(aidmp_t, ioctrl);
+
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ val &= ~SICF_CPUHALT;
+ dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
+ dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
+ }
+ }
+
+ return BCME_OK;
+}
+
static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
{
return BCME_OK;
}
-static int
+int
dhdpcie_sssr_dump(dhd_pub_t *dhd)
{
+ uint32 powerctrl_val;
+
if (!dhd->sssr_inited) {
DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
return BCME_ERROR;
return BCME_ERROR;
}
+ DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+
dhdpcie_d11_check_outofreset(dhd);
DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
}
dhdpcie_clear_intmask_and_timer(dhd);
- dhdpcie_suspend_chipcommon_powerctrl(dhd);
+ powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
dhdpcie_clear_clk_req(dhd);
dhdpcie_pcie_send_ltrsleep(dhd);
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
+ }
+
/* Wait for some time before Restore */
OSL_DELAY(6000);
- dhdpcie_resume_chipcommon_powerctrl(dhd);
+ DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+
+ if (MULTIBP_ENAB(dhd->bus->sih)) {
+ dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
+ /* Add delay for WL domain to power up */
+ OSL_DELAY(15000);
+
+ DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
+ "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
+ PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
+ PMU_REG(dhd->bus->sih, res_state, 0, 0)));
+ }
+
+ dhdpcie_arm_resume_clk_req(dhd);
+ dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
dhdpcie_bring_d11_outofreset(dhd);
DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
return BCME_ERROR;
}
-
- dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
+ dhd->sssr_dump_collected = TRUE;
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
return BCME_OK;
}
-int
-dhd_bus_sssr_dump(dhd_pub_t *dhd)
-{
- return dhdpcie_sssr_dump(dhd);
-}
-
static int
dhdpcie_fis_trigger(dhd_pub_t *dhd)
{
return BCME_ERROR;
}
- dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
+ dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
return BCME_OK;
}
}
#endif /* DHD_WAKE_STATUS */
-#define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
-#define OTP_USER_AREA_OFFSET 0x80
-#define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
-#define OTP_VERSION_TUPLE_ID 0x15
-#define OTP_VENDOR_TUPLE_ID 0x80
-#define OTP_CIS_REGION_END_TUPLE_ID 0XFF
-#define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
-#define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
-#define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
-#define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
-#define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
-#define PMU_OTP_PWR_ON_MASK 0xC47
-
-int
-dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path)
-{
- uint32 val = 0;
- uint16 chip_id = 0;
- uint8 otp_data[2];
- char stepping[3];
- char module_name[5];
- char module_vendor = 0;
- char module_rev[4];
- uint8 tuple_id = 0;
- uint8 tuple_len = 0;
- uint32 cur_offset = 0;
- uint32 version_tuple_offset = 0;
- char module_info[64];
- char progname[32];
- bool srom_present = 0, otp_present = 0;
- uint32 sprom_ctrl = 0;
- uint32 otp_ctrl = 0, minres_mask = 0;
- int i = 0, j = 0, status = BCME_ERROR;
-
- if (!nv_path || !bus) {
- return BCME_ERROR;
- }
-
- /* read chip id first */
- if (serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) {
- DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__));
- }
- else {
- chip_id = val & 0xffff;
- }
-
- /* read SpromCtrl register */
- serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE);
- val = sprom_ctrl;
-
- /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
- * and chip is 4355 or 4364
- */
- if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) {
- otp_present = 1;
-
- /* Check if the 4th bit (sprom_present) in CC Status REG is set */
- serialized_backplane_access(bus, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE);
- if (val & 0x10) {
- srom_present = 1;
- }
-
- /* OTP power up sequence */
- /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
- serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE);
- val = 0x1A0000;
- serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &val, FALSE);
-
- /* 2. enable OTP power through min res mask register in PMU */
- serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE);
- val = minres_mask | PMU_OTP_PWR_ON_MASK;
- serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE);
-
- /* 3. if srom is present, need to set OtpSelect 4th bit
- * in SpromCtrl register to read otp
- */
- if (srom_present) {
-
- val = sprom_ctrl | 0x10;
- serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &val, FALSE);
-
- }
- /* Wait for PMU to power up. */
- OSL_DELAY(500);
- serialized_backplane_access(bus, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE);
- DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val));
-
- serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE);
- DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val));
-
- serialized_backplane_access(bus, OTP_ADDRESS, 2, &val, TRUE);
- DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val));
-
- cur_offset = OTP_USER_AREA_ADDR + 0x40;
- /* read required data from otp to construct FW string name
- * data like - chip info, module info. This is present in the
- * form of a Vendor CIS Tuple whose format is provided by Olympic.
- * The data is in the form of ASCII character strings.
- * The Vendor tuple along with other CIS tuples are present
- * in the OTP user area. A CIS tuple is a TLV format.
- * (T = 1-byte, L = 1-byte, V = n-bytes)
- */
-
- /* Find the version tuple */
- while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) {
- serialized_backplane_access(bus, cur_offset,
- 2, (uint *)otp_data, TRUE);
-
- tuple_id = otp_data[0];
- tuple_len = otp_data[1];
- if (tuple_id == OTP_VERSION_TUPLE_ID) {
- version_tuple_offset = cur_offset;
- break;
- }
- /* if its NULL tuple, skip */
- if (tuple_id == 0)
- cur_offset += 1;
- else
- cur_offset += tuple_len + 2;
- }
-
- /* skip the major, minor ver. numbers, manufacturer and product names */
- cur_offset = version_tuple_offset + 6;
-
- /* read the chip info */
- serialized_backplane_access(bus, cur_offset,
- 2, (uint *)otp_data, TRUE);
- if (otp_data[0] == 's' && otp_data[1] == '=') {
- /* read the stepping */
- cur_offset += 2;
- stepping[2] = 0;
- serialized_backplane_access(bus, cur_offset,
- 2, (uint *)stepping, TRUE);
- /* read module info */
- memset(module_info, 0, 64);
- cur_offset += 2;
- serialized_backplane_access(bus, cur_offset,
- 2, (uint *)otp_data, TRUE);
- while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID &&
- otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) {
- memcpy(&module_info[i], otp_data, 2);
- i += 2;
- cur_offset += 2;
- serialized_backplane_access(bus, cur_offset,
- 2, (uint *)otp_data, TRUE);
- }
- /* replace any null characters found at the beginning
- * and middle of the string
- */
- for (j = 0; j < i; ++j) {
- if (module_info[j] == 0)
- module_info[j] = ' ';
- }
- DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
- stepping[0], stepping[1], module_info));
- /* extract the module name, revision and vendor
- * information from the module info string
- */
- for (i = 0; module_info[i]; i++) {
- if (module_info[i] == 'M' && module_info[i + 1] == '=') {
- memcpy(module_name, &module_info[i + 2], 4);
- module_name[4] = 0;
- i += 5;
- }
- else if (module_info[i] == 'm' && module_info[i + 1] == '=') {
- memcpy(module_rev, &module_info[i + 2], 3);
- module_rev[3] = 0;
- i += 4;
- }
- else if (module_info[i] == 'V' && module_info[i + 1] == '=') {
- module_vendor = module_info[i + 2];
- i += 2;
- }
- }
-
- /* construct the complete file path to nvram as per
- * olympic conventions
- */
- strncpy(progname, program, sizeof(progname));
- sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name,
- module_vendor, module_rev);
- DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path));
- status = BCME_OK;
- }
-
- /* restore back the registers to their previous values */
- if (srom_present) {
- serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl,
- FALSE);
- }
-
- if (otp_present) {
- serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4,
- &minres_mask, FALSE);
- serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE);
- }
-
- }
- return status;
-}
-
/* Writes random number(s) to the TCM. FW upon initialization reads this register
* to fetch the random number, and uses it to randomize heap address space layout.
*/
DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
dhdpcie_get_oob_irq_num(bus),
GET_SEC_USEC(bus->last_oob_irq_time)));
+ DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
+ " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_oob_irq_enable_time),
+ GET_SEC_USEC(bus->last_oob_irq_disable_time)));
+ DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
+ dhdpcie_get_oob_irq_status(bus),
+ dhdpcie_get_oob_irq_level()));
#endif /* BCMPCIE_OOB_HOST_WAKE */
- DHD_ERROR(("dpc_return_busdown_count=%lu\n",
- dhd->bus->dpc_return_busdown_count));
+ DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
+ bus->dpc_return_busdown_count, bus->non_ours_irq_count));
- current_time = OSL_SYSUPTIME_US();
+ current_time = OSL_LOCALTIME_NS();
DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(current_time)));
DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
" isr_exit_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->isr_entry_time),
GET_SEC_USEC(bus->isr_exit_time)));
+ DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
+ " last_non_ours_irq_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->dpc_sched_time),
+ GET_SEC_USEC(bus->last_non_ours_irq_time)));
DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->dpc_entry_time),
GET_SEC_USEC(bus->last_process_flowring_time),
GET_SEC_USEC(bus->last_process_txcpl_time)));
DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
- " last_process_infocpl_time="SEC_USEC_FMT"\n",
+ " last_process_infocpl_time="SEC_USEC_FMT
+ " last_process_edl_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->last_process_rxcpl_time),
- GET_SEC_USEC(bus->last_process_infocpl_time)));
+ GET_SEC_USEC(bus->last_process_infocpl_time),
+ GET_SEC_USEC(bus->last_process_edl_time)));
DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
" resched_dpc_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(bus->dpc_exit_time),
GET_SEC_USEC(bus->resched_dpc_time)));
+ DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
+ GET_SEC_USEC(bus->last_d3_inform_time)));
DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
" last_suspend_end_time="SEC_USEC_FMT"\n",
#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
- "logtrace_thread_sem_down_time="SEC_USEC_FMT
- "logtrace_thread_flush_time="SEC_USEC_FMT
- "logtrace_thread_unexpected_break_time="SEC_USEC_FMT
- " logtrace_thread_complete_time="SEC_USEC_FMT"\n",
+ " logtrace_thread_sem_down_time="SEC_USEC_FMT
+ "\nlogtrace_thread_flush_time="SEC_USEC_FMT
+ " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
+ "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
dhd_pcie_intr_count_dump(dhd);
}
+int
+dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
+{
+ uint32 save_idx, val;
+ si_t *sih = dhd->bus->sih;
+ uint32 oob_base, oob_base1;
+ uint32 wrapper_dump_list[] = {
+ AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
+ AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
+ AI_RESETSTATUS, AI_RESETCTRL,
+ AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
+ AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
+ };
+ uint32 i;
+ hndoobr_reg_t *reg;
+ cr4regs_t *cr4regs;
+ ca7regs_t *ca7regs;
+
+ save_idx = si_coreidx(sih);
+
+ DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
+
+ if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ }
+
+ if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
+ for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
+ val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
+ DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
+ }
+ DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
+ }
+
+ if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
+ DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
+ DHD_ERROR(("reg:0x%x val:0x%x\n",
+ (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
+ val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
+ DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
+ }
+
+ DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
+
+ oob_base = si_oobr_baseaddr(sih, FALSE);
+ oob_base1 = si_oobr_baseaddr(sih, TRUE);
+ if (oob_base) {
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
+ } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
+ val = R_REG(dhd->osh, ®->intstatus[0]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[1]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[2]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ val = R_REG(dhd->osh, ®->intstatus[3]);
+ DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
+ }
+
+ if (oob_base1) {
+ DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
+
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
+ dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
+ }
+
+ si_setcoreidx(dhd->bus->sih, save_idx);
+
+ return 0;
+}
+
+static void
+dhdpcie_hw_war_regdump(dhd_bus_t *bus)
+{
+ uint32 save_idx, val;
+ volatile uint32 *reg;
+
+ save_idx = si_coreidx(bus->sih);
+ if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("CC HW_WAR :0x%x\n", val));
+ }
+
+ if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
+ }
+
+ if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
+ val = R_REG(bus->osh, reg + REG_WORK_AROUND);
+ DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
+ }
+ si_setcoreidx(bus->sih, save_idx);
+
+ val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
+ DHD_ERROR(("MINRESMASK :0x%x\n", val));
+}
+
int
dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
{
return 0;
}
-int
-dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
+bool
+dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
{
uint32 intstatus = 0;
uint32 intmask = 0;
uint32 d2h_db0 = 0;
uint32 d2h_mb_data = 0;
- int host_irq_disabled;
- DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
- host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
- DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
- dhd_print_tasklet_status(dhd);
- dhd_pcie_intr_count_dump(dhd);
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_int, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x \n", intstatus));
+ return FALSE;
+ }
- DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
- dhdpcie_dump_resource(dhd->bus);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ dhd->bus->pcie_mailbox_mask, 0, 0);
+ if (intmask == (uint32) -1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
+ return FALSE;
+ }
+
+ d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCID2H_MailBox, 0, 0);
+ if (d2h_db0 == (uint32)-1) {
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ return FALSE;
+ }
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
+ intstatus, intmask, d2h_db0));
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
+ dhd->bus->def_intmask));
+
+ return TRUE;
+}
+
+void
+dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
+{
DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
- DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
+ DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("hdrlog0 =0x%x hdrlog1 =0x%x hdrlog2 =0x%x hdrlog3 =0x%x\n",
+ DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
#endif /* EXTENDED_PCIE_DEBUG_DUMP */
+}
+
+int
+dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
+{
+ int host_irq_disabled;
+
+ DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
+ host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
+ DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
+ dhd_print_tasklet_status(dhd);
+ dhd_pcie_intr_count_dump(dhd);
+
+ DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
+ dhdpcie_dump_resource(dhd->bus);
+
+ dhd_pcie_dump_rc_conf_space_cap(dhd);
DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
dhd_debug_get_rc_linkcap(dhd->bus)));
-
DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
- DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x\n",
+ DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
+ "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
PCIECFGREG_STATUS_CMD,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
PCIECFGREG_BASEADDR0,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
+ PCIE_CFG_PMCSR,
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
sizeof(uint32))));
#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
- "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
- sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
- dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
- sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
- sizeof(uint32))));
+ DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
+ dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+ DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
+ "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
+ PCI_TLP_HDR_LOG2,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
+ PCI_TLP_HDR_LOG3,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
+ PCI_TLP_HDR_LOG4,
+ dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
+ if (dhd->bus->sih->buscorerev >= 24) {
+ DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
+ "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
+ sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
+ dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
+ sizeof(uint32))));
+ dhd_bus_dump_dar_registers(dhd->bus);
+ }
#endif /* EXTENDED_PCIE_DEBUG_DUMP */
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_int, 0, 0);
-
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_mask, 0, 0);
- d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCID2H_MailBox, 0, 0);
- DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
- DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
- intstatus, intmask, d2h_db0));
- dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
- DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
- dhd->bus->def_intmask));
-#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
- DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
- PCIDARClkCtl(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARClkCtl(dhd->bus->sih->buscorerev), 0, 0),
- PCIDARPwrCtl(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARPwrCtl(dhd->bus->sih->buscorerev), 0, 0),
- PCIDARH2D_DB0(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARH2D_DB0(dhd->bus->sih->buscorerev), 0, 0)));
-#endif /* EXTENDED_PCIE_DEBUG_DUMP */
- if (intstatus == (uint32)-1) {
- DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid intstatus\n"));
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
return 0;
}
DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
-#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n",
- PCIDARErrlog(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARErrlog(dhd->bus->sih->buscorerev), 0, 0),
- PCIDARErrlog_Addr(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), 0, 0)));
- DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n",
- PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
- PCIDARMailboxint(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIDARMailboxint(dhd->bus->sih->buscorerev), 0, 0)));
-#endif /* EXTENDED_PCIE_DEBUG_DUMP */
-
DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x ltssm_hist_2(0x%x)=0x%x "
- "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
- PCIECFGREG_PHY_LTSSM_HIST_1,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
- PCIECFGREG_PHY_LTSSM_HIST_2,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
- PCIECFGREG_PHY_LTSSM_HIST_3,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
- DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
- PCIE_CLK_CTRL,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CLK_CTRL, 0, 0),
- PCIE_PWR_CTRL,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_PWR_CTRL, 0, 0),
- PCIH2D_MailBox,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIH2D_MailBox, 0, 0)));
- DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
- PCIECFGREG_TREFUP,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
- PCIECFGREG_TREFUP_EXT,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
- DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x Function_Intstatus(0x%x)=0x%x\n",
- PCIE_CORE_REG_ERRLOG,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CORE_REG_ERRLOG, 0, 0),
- PCIE_CORE_REG_ERR_ADDR,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIE_CORE_REG_ERR_ADDR, 0, 0),
- PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0)));
- DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x err_hdrlog3(0x%x)=0x%x "
- "err_hdrlog4(0x%x)=0x%x\n",
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
- DHD_ERROR(("err_code(0x%x)=0x%x\n",
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
+ if (dhd->bus->sih->buscorerev >= 24) {
+
+ DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
+ "ltssm_hist_2(0x%x)=0x%x "
+ "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
+ PCIECFGREG_PHY_LTSSM_HIST_1,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
+ PCIECFGREG_PHY_LTSSM_HIST_2,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
+ PCIECFGREG_PHY_LTSSM_HIST_3,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
+
+ DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
+ PCIECFGREG_TREFUP,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
+ PCIECFGREG_TREFUP_EXT,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
+ DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
+ "Function_Intstatus(0x%x)=0x%x "
+ "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
+ "Power_Intmask(0x%x)=0x%x\n",
+ PCIE_CORE_REG_ERRLOG,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERRLOG, 0, 0),
+ PCIE_CORE_REG_ERR_ADDR,
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIE_CORE_REG_ERR_ADDR, 0, 0),
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
+ PCIPowerIntmask(dhd->bus->sih->buscorerev),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
+ DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
+ "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
+ DHD_ERROR(("err_code(0x%x)=0x%x\n",
+ (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
+
+ dhd_pcie_dump_wrapper_regs(dhd);
+ dhdpcie_hw_war_regdump(dhd->bus);
+ }
#endif /* EXTENDED_PCIE_DEBUG_DUMP */
dhd_pcie_dma_info_dump(dhd);
{
return bus->force_bt_quiesce;
}
+
+#ifdef DHD_HP2P
+uint16
+dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
+{
+ if (tx)
+ return bus->hp2p_txcpl_max_items;
+ else
+ return bus->hp2p_rxcpl_max_items;
+}
+
+static uint16
+dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
+{
+ if (tx)
+ bus->hp2p_txcpl_max_items = val;
+ else
+ bus->hp2p_rxcpl_max_items = val;
+ return val;
+}
+#endif /* DHD_HP2P */
+
+static bool
+dhd_bus_tcm_test(struct dhd_bus *bus)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ int num = 0;
+ uint8 *read_buf, *write_buf;
+ uint8 init_val[NUM_PATTERNS] = {
+ 0xFFu, /* 11111111 */
+ 0x00u, /* 00000000 */
+ };
+
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!read_buf) {
+ DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
+
+ if (!write_buf) {
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
+ DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
+
+ while (num < NUM_PATTERNS) {
+ start = bus->dongle_ram_base;
+ /* Get full mem size */
+ size = bus->ramsize;
+
+ memset(write_buf, init_val[num], MEMBLOCK);
+ while (size > 0) {
+ read_size = MIN(MEMBLOCK, size);
+ memset(read_buf, 0, read_size);
+
+ /* Write */
+ if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
+ DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Read */
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
+ DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Compare */
+ if (memcmp(read_buf, write_buf, read_size)) {
+ DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
+ __FUNCTION__, start, num));
+ prhex("Readbuf", read_buf, read_size);
+ prhex("Writebuf", write_buf, read_size);
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+ return FALSE;
+ }
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ }
+ num++;
+ }
+
+ MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
+ MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
+
+ DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
+ return TRUE;
+}
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie.h 771838 2018-07-12 03:46:37Z $
+ * $Id: dhd_pcie.h 816392 2019-04-24 14:39:02Z $
*/
#ifndef dhd_pcie_h
#define IDLE_FLOW_RING_TIMEOUT 5000
#endif /* IDLE_TX_FLOW_MGMT */
+/* HWA enabled and inited */
+#define HWA_ACTIVE(dhd) (((dhd)->hwa_enable) && ((dhd)->hwa_inited))
+
/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
#define IDMA_ENAB(dhd) ((dhd)->idma_enable)
#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited))
#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
+#define PCIE_RELOAD_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
+
+/*
+ * HW JIRA - CRWLPCIEGEN2-672
+ * Producer Index Feature which is used by F1 gets reset on F0 FLR
+ * fixed in REV68
+ */
+#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
+ ((buscorerev == 66) || (buscorerev == 67))
+
struct dhd_bus;
struct dhd_pcie_rev {
sbpcieregs_t *reg; /* Registers for PCIE core */
uint armrev; /* CPU core revision */
+ uint coreid; /* CPU core id */
uint ramrev; /* SOCRAM core revision */
uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
bool use_mailbox;
bool use_d0_inform;
void *bus_lock;
+ void *backplane_access_lock;
enum dhd_bus_low_power_state bus_low_power_state;
uint32 hostready_count; /* Number of hostready issued */
#if defined(BCMPCIE_OOB_HOST_WAKE)
ulong isr_intr_disable_count;
ulong suspend_intr_disable_count;
ulong dpc_return_busdown_count;
+ ulong non_ours_irq_count;
#ifdef BCMPCIE_OOB_HOST_WAKE
ulong oob_intr_count;
ulong oob_intr_enable_count;
ulong oob_intr_disable_count;
uint64 last_oob_irq_time;
+ uint64 last_oob_irq_enable_time;
+ uint64 last_oob_irq_disable_time;
#endif /* BCMPCIE_OOB_HOST_WAKE */
uint64 isr_entry_time;
uint64 isr_exit_time;
+ uint64 dpc_sched_time;
uint64 dpc_entry_time;
uint64 dpc_exit_time;
uint64 resched_dpc_time;
+ uint64 last_d3_inform_time;
uint64 last_process_ctrlbuf_time;
uint64 last_process_flowring_time;
uint64 last_process_txcpl_time;
uint64 last_suspend_end_time;
uint64 last_resume_start_time;
uint64 last_resume_end_time;
+ uint64 last_non_ours_irq_time;
+ uint8 hwa_enab_bmap;
bool idma_enabled;
bool ifrm_enabled;
bool dar_enabled;
uint8 dma_chan;
bool cto_enable; /* enable PCIE CTO Prevention and recovery */
uint32 cto_threshold; /* PCIE CTO timeout threshold */
+ bool cto_triggered; /* CTO is triggered */
int pwr_req_ref;
bool flr_force_fail; /* user intends to simulate flr force fail */
bool intr_enabled; /* ready to receive interrupts from dongle */
#endif /* DHD_H2D_LOG_TIME_SYNC */
bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
+ uint16 hp2p_txcpl_max_items;
+ uint16 hp2p_rxcpl_max_items;
+ /* PCIE coherent status */
+ uint32 coherent_state;
} dhd_bus_t;
#ifdef DHD_MSI_SUPPORT
extern void dhdpcie_free_irq(dhd_bus_t *bus);
extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
+extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
#else
extern int dhdpcie_disable_irq(dhd_bus_t *bus);
extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
extern int dhdpcie_enable_irq(dhd_bus_t *bus);
+
+extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
+
extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
bool is_write, uint32 writeval);
extern void dhdpcie_free_resource(dhd_bus_t *bus);
extern void dhdpcie_dump_resource(dhd_bus_t *bus);
extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
+void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
+void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
+#ifdef DHD_SUPPORT_64BIT
+void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
+#endif // endif
extern int dhdpcie_enable_device(dhd_bus_t *bus);
extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
-extern int dhdpcie_get_oob_irq_num(dhd_bus_t *bus);
+extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
+extern int dhdpcie_get_oob_irq_level(void);
#endif /* BCMPCIE_OOB_HOST_WAKE */
#if defined(CONFIG_ARCH_EXYNOS)
#define SAMSUNG_PCIE_VENDOR_ID 0x144d
-#if defined(CONFIG_MACH_UNIVERSAL5433)
-#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
-#define SAMSUNG_PCIE_CH_NUM
-#elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
#define SAMSUNG_PCIE_DEVICE_ID 0xa575
#define SAMSUNG_PCIE_CH_NUM 1
#elif defined(CONFIG_SOC_EXYNOS8890)
#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
#endif /* CONFIG_ARCH_EXYNOS */
+#define DHD_REGULAR_RING 0
+#define DHD_HP2P_RING 1
+
#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
-#ifdef CONFIG_MACH_UNIVERSAL5433
-extern int exynos_pcie_pm_suspend(void);
-extern int exynos_pcie_pm_resume(void);
-#else
extern int exynos_pcie_pm_suspend(int ch_num);
extern int exynos_pcie_pm_resume(int ch_num);
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
#ifdef CONFIG_ARCH_TEGRA
static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
+static INLINE void
+dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
+{ return; }
int dhdpcie_config_check(dhd_bus_t *bus);
int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
int dhdpcie_config_save(dhd_bus_t *bus);
int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
+extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
}
-extern int dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char *program, char *nv_path);
+extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
+ char *clm_path, char *txcap_path);
extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
+#ifdef DHD_HP2P
+extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
+#endif // endif
+
#endif /* dhd_pcie_h */
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie_linux.c 771838 2018-07-12 03:46:37Z $
+ * $Id: dhd_pcie_linux.c 821650 2019-05-24 10:41:54Z $
*/
/* include files */
#endif /* FORCE_TPOWERON */
/* user defined data structures */
-typedef struct dhd_pc_res {
- uint32 bar0_size;
- void* bar0_addr;
- uint32 bar1_size;
- void* bar1_addr;
-} pci_config_res, *pPci_config_res;
-
typedef bool (*dhdpcie_cb_fn_t)(void *);
typedef struct dhdpcie_info
{
dhd_bus_t *bus;
- osl_t *osh;
+ osl_t *osh;
struct pci_dev *dev; /* pci device handle */
- volatile char *regs; /* pci device memory va */
- volatile char *tcm; /* pci device memory va */
- uint32 tcm_size; /* pci device memory size */
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 bar1_size; /* pci device memory size */
+ uint32 curr_bar1_win; /* current PCIEBar1Window setting */
struct pcos_info *pcos_info;
uint16 last_intrstatus; /* to cache intrstatus */
int irq;
static int dhdpcie_pm_system_resume_noirq(struct device * dev);
#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
+
uint32
dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
uint32 writeval);
id_table: dhdpcie_pci_devid,
probe: dhdpcie_pci_probe,
remove: dhdpcie_pci_remove,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- save_state: NULL,
-#endif // endif
#if defined(DHD_PCIE_NATIVE_RUNTIMEPM)
.driver.pm = &dhd_pcie_pm_ops,
#else
}
}
+void
+dhd_bus_aer_config(dhd_bus_t *bus)
+{
+ uint32 val;
+
+ DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
+ val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+
+ DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
+ val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
+ if (val != (uint32)-1) {
+ val &= ~CORR_ERR_AE;
+ dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
+ PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
+ } else {
+ DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
+ __FUNCTION__, val));
+ }
+}
+
static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
{
int ret = 0;
dhdpcie_info_t *pch = pci_get_drvdata(pdev);
dhd_bus_t *bus = NULL;
unsigned long flags;
+ uint32 i = 0;
if (pch) {
bus = pch->bus;
BCM_REFERENCE(state);
- DHD_GENERAL_LOCK(bus->dhd, flags);
if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
__FUNCTION__, bus->dhd->dhd_bus_busy_state));
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
- return -EBUSY;
+
+ OSL_DELAY(1000);
+ /* retry till the transaction is complete */
+ while (i < 100) {
+ OSL_DELAY(1000);
+ i++;
+ if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
+ __FUNCTION__, i));
+ break;
+ }
+ }
+ if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
+ "dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
+ return -EBUSY;
+ }
}
+ DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+static void
+dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
+{
+ DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
+ "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
+ suspend_state,
+ PCIECFGREG_BASEADDR0,
+ dhd_pcie_config_read(bus->osh,
+ PCIECFGREG_BASEADDR0, sizeof(uint32)),
+ PCIECFGREG_BASEADDR1,
+ dhd_pcie_config_read(bus->osh,
+ PCIECFGREG_BASEADDR1, sizeof(uint32)),
+ PCIE_CFG_PMCSR,
+ dhd_pcie_config_read(bus->osh,
+ PCIE_CFG_PMCSR, sizeof(uint32))));
+}
+
static int dhdpcie_suspend_dev(struct pci_dev *dev)
{
int ret;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhdpcie_info_t *pch = pci_get_drvdata(dev);
dhd_bus_t *bus = pch->bus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
if (bus->is_linkdown) {
DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
return BCME_ERROR;
}
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ DHD_ERROR(("%s: Enter\n", __FUNCTION__));
+ dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhd_dpc_tasklet_kill(bus->dhd);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pch->state = pci_store_saved_state(dev);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_enable_wake(dev, PCI_D0, TRUE);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
if (pci_is_enabled(dev))
-#endif // endif
pci_disable_device(dev);
ret = pci_set_power_state(dev, PCI_D3hot);
DHD_ERROR(("%s: pci_set_power_state error %d\n",
__FUNCTION__, ret));
}
- dev->state_saved = FALSE;
+// dev->state_saved = FALSE;
+ dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
return ret;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
pci_load_and_free_saved_state(dev, &pch->state);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
- dev->state_saved = TRUE;
+ DHD_ERROR(("%s: Enter\n", __FUNCTION__));
+// dev->state_saved = TRUE;
pci_restore_state(dev);
#ifdef FORCE_TPOWERON
if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
goto out;
}
BCM_REFERENCE(pch);
+ dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
out:
return err;
}
return bcmerror;
}
+/**
+ * dhdpcie_os_setbar1win
+ *
+ * Interface function for setting bar1 window in order to allow
+ * os layer to be aware of current window positon.
+ *
+ * @bus: dhd bus context
+ * @addr: new backplane windows address for BAR1
+ */
+void
+dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr);
+ pch->curr_bar1_win = addr;
+}
+
+/**
+ * dhdpcie_os_chkbpoffset
+ *
+ * Check the provided address is within the current BAR1 window,
+ * if not, shift the window
+ *
+ * @bus: dhd bus context
+ * @offset: back plane address that the caller wants to access
+ *
+ * Return: new offset for access
+ */
+static ulong
+dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset)
+{
+ /* Determine BAR1 backplane window using window size
+ * Window address mask should be ~(size - 1)
+ */
+ uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1));
+
+ if (bpwin != pch->curr_bar1_win) {
+ /* Move BAR1 window */
+ dhdpcie_os_setbar1win(pch->bus, bpwin);
+ }
+
+ return offset - bpwin;
+}
+
+/**
+ * dhdpcie os layer tcm read/write interface
+ */
+void
+dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+}
+
+uint8
+dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint8 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+ return data;
+}
+
+void
+dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+}
+
+uint16
+dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint16 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+ return data;
+}
+
+void
+dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+}
+
+uint32
+dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint32 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+ return data;
+}
+
+#ifdef DHD_SUPPORT_64BIT
+void
+dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+}
+
+uint64
+dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint64 data;
+ dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+ offset = dhdpcie_os_chkbpoffset(pch, offset);
+ data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+ return data;
+}
+#endif /* DHD_SUPPORT_64BIT */
+
uint32
dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
{
return linkcap;
}
+static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
+{
+ if (bus->coreid == ARMCA7_CORE_ID) {
+ if (state) {
+ /* Sleep */
+ bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
+ PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
+ } else {
+ uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
+ 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
+ dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
+ }
+ }
+}
+
int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
{
int rc;
struct pci_dev *dev = bus->dev;
if (state) {
+ dhdpcie_config_save_restore_coherent(bus, state);
#if !defined(BCMPCIE_OOB_HOST_WAKE)
dhdpcie_pme_active(bus->osh, state);
#endif // endif
rc = dhdpcie_resume_host_dev(bus);
if (!rc) {
rc = dhdpcie_resume_dev(dev);
- if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
+ if (PCIECTO_ENAB(bus)) {
/* reinit CTO configuration
* because cfg space got reset at D3 (PERST)
*/
- dhdpcie_cto_init(bus, bus->cto_enable);
+ dhdpcie_cto_cfg_init(bus, TRUE);
}
- if (bus->sih->buscorerev == 66) {
+ if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
dhdpcie_ssreset_dis_enum_rst(bus);
}
#if !defined(BCMPCIE_OOB_HOST_WAKE)
dhdpcie_pme_active(bus->osh, state);
#endif // endif
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ dhdpcie_config_save_restore_coherent(bus, state);
if (bus->is_linkdown) {
bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
dhd_os_send_hang_message(bus->dhd);
}
-#endif // endif
}
return rc;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
static int dhdpcie_device_scan(struct device *dev, void *data)
{
struct pci_dev *pcidev;
return 0;
}
-#endif /* LINUX_VERSION >= 2.6.0 */
int
dhdpcie_bus_register(void)
{
int error = 0;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- if (!(error = pci_module_init(&dhdpcie_driver)))
- return 0;
-
- DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
-#else
if (!(error = pci_register_driver(&dhdpcie_driver))) {
bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
if (!error) {
pci_unregister_driver(&dhdpcie_driver);
error = BCME_ERROR;
}
-#endif /* LINUX_VERSION < 2.6.0 */
return error;
}
int __devinit
dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ int err = 0;
DHD_MUTEX_LOCK();
if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
- DHD_MUTEX_UNLOCK();
- return -ENODEV;
+ err = -ENODEV;
+ goto exit;
}
+
printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
"(good PCI location)\n", pdev->bus->number,
PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+ if (dhdpcie_init_succeeded == TRUE) {
+ DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
+ __FUNCTION__));
+ err = -ENODEV;
+ goto exit;
+ }
+
if (dhdpcie_init (pdev)) {
DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
- DHD_MUTEX_UNLOCK();
- return -ENODEV;
+ err = -ENODEV;
+ goto exit;
}
#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+exit:
DHD_MUTEX_UNLOCK();
- return 0;
+ return err;
}
int
dhdpcie_bus_release(bus);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+
if (pci_is_enabled(pdev))
-#endif // endif
pci_disable_device(pdev);
#ifdef BCMPCIE_OOB_HOST_WAKE
/* pcie os info detach */
}
dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
- dhdpcie_info->tcm_size =
+ dhdpcie_info->bar1_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
}
/* BAR0 */
- DHD_ERROR(("%s: BAR0(VA): 0x%p, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
__FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
DONGLE_REG_MAP_SIZE));
/* BAR1 */
- DHD_ERROR(("%s: BAR1(VA): 0x%p, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
+ DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
__FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
- pch->tcm_size));
+ pch->bar1_size));
}
int dhdpcie_init(struct pci_dev *pdev)
dhdpcie_info->bus = bus;
bus->is_linkdown = 0;
bus->no_bus_init = FALSE;
+ bus->cto_triggered = 0;
bus->rc_dev = NULL;
if (bus->dev->bus) {
/* self member of structure pci_bus is bridge device as seen by parent */
bus->rc_dev = bus->dev->bus->self;
- DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %p\n", __FUNCTION__,
+ DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
} else {
DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
- if (dhd_register_if(bus->dhd, 0, TRUE)) {
+ if (dhd_attach_net(bus->dhd, TRUE)) {
DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
break;
}
if (bus) {
pdev = bus->dev;
if (bus->irq_registered) {
+#if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
+ /* clean up the affinity_hint before
+ * the unregistration of PCIe irq
+ */
+ (void)irq_set_affinity_hint(pdev->irq, NULL);
+#endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
free_irq(pdev->irq, bus);
bus->irq_registered = FALSE;
if (bus->d2h_intr_method == PCIE_MSI) {
dhdpcie_isr(int irq, void *arg)
{
dhd_bus_t *bus = (dhd_bus_t*)arg;
- bus->isr_entry_time = OSL_SYSUPTIME_US();
+ bus->isr_entry_time = OSL_LOCALTIME_NS();
if (!dhdpcie_bus_isr(bus)) {
DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
}
- bus->isr_exit_time = OSL_SYSUPTIME_US();
+ bus->isr_exit_time = OSL_LOCALTIME_NS();
return IRQ_HANDLED;
}
return BCME_ERROR;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
if (pci_is_enabled(bus->dev))
-#endif // endif
pci_disable_device(bus->dev);
return 0;
}
bus->regs = dhdpcie_info->regs;
- dhdpcie_info->tcm_size =
+ dhdpcie_info->bar1_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
if (!dhdpcie_info->tcm) {
DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
}
#ifdef BCMPCIE_OOB_HOST_WAKE
-int dhdpcie_get_oob_irq_num(dhd_bus_t *bus)
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+extern int dhd_get_wlan_oob_gpio(void);
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+
+int dhdpcie_get_oob_irq_level(void)
+{
+ int gpio_level;
+
+#ifdef CONFIG_BCMDHD_GET_OOB_STATE
+ gpio_level = dhd_get_wlan_oob_gpio();
+#else
+ gpio_level = BCME_UNSUPPORTED;
+#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
+ return gpio_level;
+}
+
+int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
+{
+ dhdpcie_info_t *pch;
+ dhdpcie_os_info_t *dhdpcie_osinfo;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL) {
+ DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+ return 0;
+ }
+
+ dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+
+ return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
+}
+
+int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
{
dhdpcie_info_t *pch;
dhdpcie_os_info_t *dhdpcie_osinfo;
if (enable) {
enable_irq(dhdpcie_osinfo->oob_irq_num);
bus->oob_intr_enable_count++;
+ bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
} else {
disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
bus->oob_intr_disable_count++;
+ bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
}
dhdpcie_osinfo->oob_irq_enabled = enable;
}
DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
bus = (dhd_bus_t *)data;
dhdpcie_oob_intr_set(bus, FALSE);
- bus->last_oob_irq_time = OSL_SYSUPTIME_US();
+ bus->last_oob_irq_time = OSL_LOCALTIME_NS();
bus->oob_intr_count++;
#ifdef DHD_WAKE_STATUS
{
else
return NULL;
}
+
+#define KIRQ_PRINT_BUF_LEN 256
+
+void
+dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
+{
+ unsigned long flags = 0;
+ struct irq_desc *desc;
+ int i; /* cpu iterator */
+ struct bcmstrbuf strbuf;
+ char tmp_buf[KIRQ_PRINT_BUF_LEN];
+
+ desc = irq_to_desc(irq_num);
+ if (!desc) {
+ DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
+ return;
+ }
+ bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
+ for_each_online_cpu(i)
+ bcm_bprintf(&strbuf, "%10u ",
+ desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
+ if (desc->irq_data.chip) {
+ if (desc->irq_data.chip->name)
+ bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
+ else
+ bcm_bprintf(&strbuf, " %8s", "-");
+ } else {
+ bcm_bprintf(&strbuf, " %8s", "None");
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+ if (desc->irq_data.domain)
+ bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
+#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
+ bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
+#endif // endif
+#endif /* LINUX VERSION > 3.1.0 */
+
+ if (desc->name)
+ bcm_bprintf(&strbuf, "-%-8s", desc->name);
+
+ DHD_ERROR(("%s\n", strbuf.origbuf));
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void
+dhd_show_kirqstats(dhd_pub_t *dhd)
+{
+ unsigned int irq = -1;
+#ifdef BCMPCIE
+ dhdpcie_get_pcieirq(dhd->bus, &irq);
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
+#endif /* BCMSDIO */
+ if (irq != -1) {
+#ifdef BCMPCIE
+ DHD_ERROR(("DUMP data kernel irq stats : \n"));
+#endif /* BCMPCIE */
+#ifdef BCMSDIO
+ DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
+#endif /* BCMSDIO */
+ dhd_print_kirqstats(dhd, irq);
+ }
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ irq = dhdpcie_get_oob_irq_num(dhd->bus);
+ if (irq) {
+ DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
+ dhd_print_kirqstats(dhd, irq);
+ }
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+}
+
+#ifdef DHD_FW_COREDUMP
+int
+dhd_dongle_mem_dump(void)
+{
+ if (!g_dhd_bus) {
+ DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ dhd_bus_dump_console_buffer(g_dhd_bus);
+ dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+ g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+ dhd_bus_mem_dump(g_dhd_bus->dhd);
+ return 0;
+}
+EXPORT_SYMBOL(dhd_dongle_mem_dump);
+#endif /* DHD_FW_COREDUMP */
+
+bool
+dhd_bus_check_driver_up(void)
+{
+ dhd_bus_t *bus;
+ dhd_pub_t *dhdp;
+ bool isup = FALSE;
+
+ bus = (dhd_bus_t *)g_dhd_bus;
+ if (!bus) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return isup;
+ }
+
+ dhdp = bus->dhd;
+ if (dhdp) {
+ isup = dhdp->up;
+ }
+
+ return isup;
+}
+EXPORT_SYMBOL(dhd_bus_check_driver_up);
* Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.c 736010 2017-12-13 08:45:59Z $
+ * $Id: dhd_pno.c 812762 2019-04-02 09:36:26Z $
*/
#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
#ifdef GSCAN_SUPPORT
static uint64
-convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms)
+convert_fw_rel_time_to_systime(struct osl_timespec *ts, uint32 fw_ts_ms)
{
return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000));
}
if (gscan_params->epno_cfg.num_epno_ssid > 0) {
i = 0;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&gscan_params->epno_cfg.epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (i++ == idx) {
memcpy(res->ssid, iter->SSID, iter->SSID_len);
res->ssid_len = iter->SSID_len;
return BCME_NOMEM;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next, ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
pfn_elem_buf[i].infra = htod32(1);
pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM);
pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth);
break;
}
}
+
err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE);
if (err < 0) {
DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
}
DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
/* preestimate scan count until which scan result this report is going to end */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
¶ms_batch->get_batch.expired_scan_results_list, list) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
phead = siter->bestnetheader;
while (phead != NULL) {
/* if left_size is less than bestheader total size , stop this */
bp += nreadsize = snprintf(bp, nleftsize, "trunc\n");
nleftsize -= nreadsize;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&phead->entry_list, list) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
#ifdef PNO_DEBUG
_base_bp = bp;
}
params_batch->get_batch.expired_tot_scan_cnt -= cnt;
/* set FALSE only if the link list is empty after returning the data */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
params_batch->get_batch.batch_started = FALSE;
bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER);
DHD_PNO(("%s", RESULTS_END_MARKER));
NULL_CHECK(head, "head is NULL", err);
NULL_CHECK(head->next, "head->next is NULL", err);
DHD_PNO(("%s enter\n", __FUNCTION__));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
head, list) {
if (only_last) {
MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
return removed_scan_cnt;
}
case DHD_PNO_LEGACY_MODE: {
struct dhd_pno_ssid *iter, *next;
if (params->params_legacy.nssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
¶ms->params_legacy.ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
params->params_legacy.nssid = 0;
params->params_legacy.scan_fr = 0;
params->params_legacy.pno_freq_expo_max = 0;
case DHD_PNO_HOTLIST_MODE: {
struct dhd_pno_bssid *iter, *next;
if (params->params_hotlist.nbssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
¶ms->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
params->params_hotlist.scan_fr = 0;
params->params_hotlist.nbssid = 0;
goto exit;
}
/* convert dhd_pno_bssid to wl_pfn_bssid */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
cnt = 0;
tmp_bssid = p_pfn_bssid;
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
memcpy(&tmp_bssid->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
tmp_bssid->flags = iter->flags;
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
struct dhd_pno_bssid *iter, *next;
if (_params->params_gscan.nbssid_hotlist > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&_params->params_gscan.hotlist_bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
}
_params->params_gscan.nbssid_hotlist = 0;
DHD_PNO(("Flush Hotlist Config\n"));
dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg;
if (epno_cfg->num_epno_ssid > 0) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&epno_cfg->epno_ssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
epno_cfg->num_epno_ssid = 0;
}
memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t));
return err;
}
-static void *
-dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
-{
- gscan_results_cache_t *iter, *results;
- dhd_pno_status_info_t *_pno_state;
- dhd_pno_params_t *_params;
- uint16 num_scan_ids = 0, num_results = 0;
-
- _pno_state = PNO_GET_PNOSTATE(dhd);
- _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
-
- iter = results = _params->params_gscan.gscan_batch_cache;
- while (iter) {
- num_results += iter->tot_count - iter->tot_consumed;
- num_scan_ids++;
- iter = iter->next;
- }
-
- *len = ((num_results << 16) | (num_scan_ids));
- return results;
-}
-
int
dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *buf, bool flush)
INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
}
+ if ((_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+ DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
+ (_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid)));
+ err = BCME_RANGE;
+ goto exit;
+ }
+
for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
_pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
sizeof(struct dhd_pno_bssid));
ptr = p_pfn_bssid;
/* convert dhd_pno_bssid to wl_pfn_bssid */
DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&gscan_params->hotlist_bssid_list, list) {
char buffer_hotlist[64];
+ GCC_DIAGNOSTIC_POP();
memcpy(&ptr->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
BCM_REFERENCE(buffer_hotlist);
ptr->flags = iter->flags;
ptr++;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
if (err < 0) {
uint16 count;
uint16 fwcount;
uint16 fwstatus = PFN_INCOMPLETE;
- struct timespec tm_spec;
+ struct osl_timespec tm_spec;
/* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2
* net_info and subnet_info structures are compatible in size and SSID offset,
__FUNCTION__, err));
goto exit_mutex_unlock;
}
- get_monotonic_boottime(&tm_spec);
+ osl_get_monotonic_boottime(&tm_spec);
if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
fwstatus = plbestnet_v1->status;
#endif /* GSCAN_SUPPORT */
#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+ gscan_results_cache_t *iter, *results;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ uint16 num_scan_ids = 0, num_results = 0;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ iter = results = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ num_scan_ids++;
+ iter = iter->next;
+ }
+
+ *len = ((num_results << 16) | (num_scan_ids));
+ return results;
+}
+
void *
dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *info, uint32 *len)
ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE;
ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN;
+ ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
ptr->max_scan_reporting_threshold = 100;
- ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_hotlist_ssids = 0;
+ ptr->max_significant_wifi_change_aps = 0;
+ ptr->max_bssid_history_entries = 0;
ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM;
ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID;
ptr->max_white_list_ssid = MAX_WHITELIST_SSID;
ret = (void *)ptr;
*len = sizeof(dhd_pno_gscan_capabilities_t);
break;
-#ifdef GSCAN_SUPPORT
+
case DHD_PNO_GET_BATCH_RESULTS:
ret = dhd_get_gscan_batch_results(dhd, len);
break;
-#endif /* GSCAN_SUPPORT */
case DHD_PNO_GET_CHANNEL_LIST:
if (info) {
uint16 ch_list[WL_NUMCHANNELS];
*len = 0;
} else {
mem_needed = sizeof(uint32) * nchan;
- p = (uint32 *)MALLOCZ(dhd->osh, mem_needed);
+ p = (uint32 *)MALLOC(dhd->osh, mem_needed);
if (!p) {
DHD_ERROR(("%s: Unable to malloc %d bytes\n",
__FUNCTION__, mem_needed));
epno_cfg->num_epno_ssid++;
list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list);
ret = ssid_elem;
- *len = sizeof(dhd_pno_ssid_t);
break;
default:
DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
/* this is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
_params->params_batch.get_batch.tot_scan_cnt;
/* This is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
/* reset gloval values after moving to expired list */
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
dhd_pub_t *dhd;
struct dhd_pno_batch_params *params_batch;
DHD_PNO(("%s enter\n", __FUNCTION__));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
_pno_state = container_of(work, struct dhd_pno_status_info, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
dhd = _pno_state->dhd;
if (dhd == NULL) {
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
}
i = 0;
/* convert dhd_pno_bssid to wl_pfn_bssid */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
+ GCC_DIAGNOSTIC_POP();
memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
p_pfn_bssid[i].flags = iter->flags;
i++;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
u32 bi_length = 0;
uint8 channel;
uint32 mem_needed;
- struct timespec ts;
+ struct osl_timespec ts;
u32 bi_ie_length = 0;
u32 bi_ie_offset = 0;
*size = 0;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
gscan_result = (wl_gscan_result_t *)data;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (!gscan_result) {
DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
goto exit;
result->fixed.rssi = (int32) bi->RSSI;
result->fixed.rtt = 0;
result->fixed.rtt_sd = 0;
- get_monotonic_boottime(&ts);
+ osl_get_monotonic_boottime(&ts);
result->fixed.ts = (uint64) TIMESPEC_TO_US(ts);
result->fixed.beacon_period = dtoh16(bi->beacon_period);
result->fixed.capability = dtoh16(bi->capability);
gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data;
wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
wl_pfn_net_info_v1_t *net;
wl_pfn_net_info_v2_t *net_v2;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) {
- /* Check if count of pfn results is corrupted */
- if (pfn_result->count > EVENT_MAX_NETCNT_V1) {
- DHD_ERROR(("%s event %d: pfn results count %d"
- "exceeds the max limit\n",
- __FUNCTION__, event, pfn_result->count));
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) {
+ DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
return NULL;
}
count = pfn_result->count;
results[i].rssi, results[i].flags));
}
} else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- /* Check if count of pfn results is corrupted */
- if (pfn_result_v2->count > EVENT_MAX_NETCNT_V2) {
- DHD_ERROR(("%s event %d: pfn results count %d"
- "exceeds the max limit\n",
- __FUNCTION__, event, pfn_result_v2->count));
+ if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) {
+ DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n",
+ __FUNCTION__, event, pfn_result->count));
return NULL;
}
count = pfn_result_v2->count;
void *ptr = NULL;
dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
struct dhd_pno_gscan_params *gscan_params;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data;
wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
wifi_gscan_result_t *hotlist_found_array;
wl_pfn_net_info_v1_t *pnetinfo;
wl_pfn_net_info_v2_t *pnetinfo_v2;
gscan_results_cache_t *gscan_hotlist_cache;
- uint32 malloc_size = 0, i, total = 0;
- struct timespec tm_spec;
+ u32 malloc_size = 0, i, total = 0;
+ struct osl_timespec tm_spec;
uint16 fwstatus;
uint16 fwcount;
return ptr;
}
- get_monotonic_boottime(&tm_spec);
+ osl_get_monotonic_boottime(&tm_spec);
malloc_size = sizeof(gscan_results_cache_t) +
((fwcount - 1) * sizeof(wifi_gscan_result_t));
gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
return ptr;
}
- get_monotonic_boottime(&tm_spec);
+ osl_get_monotonic_boottime(&tm_spec);
malloc_size = sizeof(gscan_results_cache_t) +
((fwcount - 1) * sizeof(wifi_gscan_result_t));
gscan_hotlist_cache =
* Header file of Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.h 722186 2017-09-19 07:03:42Z $
+ * $Id: dhd_pno.h 805174 2019-02-15 17:26:01Z $
*/
#ifndef __DHD_PNO_H__
struct ether_addr bssid;
} dhd_epno_results_t;
+typedef struct dhd_pno_swc_evt_param {
+ uint16 results_rxed_so_far;
+ wl_pfn_significant_net_t *change_array;
+} dhd_pno_swc_evt_param_t;
+
typedef struct wifi_gscan_result {
uint64 ts; /* Time of discovery */
char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
int max_ap_cache_per_scan;
int max_rssi_sample_size;
int max_scan_reporting_threshold;
- int max_hotlist_aps;
+ int max_hotlist_bssids;
+ int max_hotlist_ssids;
int max_significant_wifi_change_aps;
+ int max_bssid_history_entries;
int max_epno_ssid_crc32;
int max_epno_hidden_ssid;
int max_white_list_ssid;
uint8 bestn;
uint8 mscan;
uint8 buffer_threshold;
+ uint8 swc_nbssid_threshold;
+ uint8 swc_rssi_window_size;
uint8 lost_ap_window;
uint8 nchannel_buckets;
uint8 reason;
gscan_results_cache_t *gscan_batch_cache;
gscan_results_cache_t *gscan_hotlist_found;
gscan_results_cache_t*gscan_hotlist_lost;
+ uint16 nbssid_significant_change;
uint16 nbssid_hotlist;
+ struct dhd_pno_swc_evt_param param_significant;
struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
struct list_head hotlist_bssid_list;
+ struct list_head significant_bssid_list;
dhd_epno_ssid_cfg_t epno_cfg;
uint32 scan_id;
};
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_proto.h 769450 2018-06-26 10:16:46Z $
+ * $Id: dhd_proto.h 814912 2019-04-15 10:38:59Z $
*/
#ifndef _dhd_proto_h_
#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT
#endif /* IOCTL_RESP_TIMEOUT */
+/* In milli second default value for Production FW */
+#define IOCTL_DMAXFER_TIMEOUT 10000
+
#ifndef MFG_IOCTL_RESP_TIMEOUT
#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
#endif /* MFG_IOCTL_RESP_TIMEOUT */
uint reorder_info_len, void **pkt, uint32 *free_buf_count);
#ifdef BCMPCIE
-extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound);
-extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype);
extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound);
extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd);
extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd,
uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num);
-extern dma_xfer_status_t dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd);
+extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result);
extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id);
extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
extern void dhd_prot_reset(dhd_pub_t *dhd);
+extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
#ifdef IDLE_TX_FLOW_MGMT
extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count);
extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
#endif /* IDLE_TX_FLOW_MGMT */
extern int dhd_prot_init_info_rings(dhd_pub_t *dhd);
+#ifdef DHD_HP2P
+extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd);
+#endif /* DHD_HP2P */
+
+extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd);
+extern void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd);
+extern void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd) { return; }
+static INLINE void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd) { return; }
#endif /* BCMPCIE */
#ifdef DHD_LB
#define DHD_PROTOCOL "unknown"
#endif /* proto */
-void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len);
-void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff);
+int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len);
+int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff);
+
+#ifdef DHD_HP2P
+extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable);
+extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val);
+#endif // endif
+#ifdef DHD_MAP_LOGGING
+extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp);
+#endif /* DHD_MAP_LOGGING */
#endif /* _dhd_proto_h_ */
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#include <linux/sort.h>
#include <dngl_stats.h>
#include <wlioctl.h>
+#include <bcmwifi_rspec.h>
#include <bcmevent.h>
#include <dhd.h>
#include <dhd_rtt.h>
#include <dhd_dbg.h>
+#include <dhd_bus.h>
#include <wldev_common.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
} \
} while (0)
-#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
-#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
(ts).tv_nsec / NSEC_PER_USEC)
+#undef DHD_RTT_MEM
+#undef DHD_RTT_ERR
+#define DHD_RTT_MEM DHD_LOG_MEM
+#define DHD_RTT_ERR DHD_ERROR
+
#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */
#define FTM_AVAIL_MAX_SLOTS 32
#define FTM_MAX_CONFIGS 10
#define FTM_DEFAULT_SESSION 1
#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */
#define FTM_INVALID -1
-#define FTM_DEFAULT_CNT_20M 12
-#define FTM_DEFAULT_CNT_40M 10
-#define FTM_DEFAULT_CNT_80M 5
+#define FTM_DEFAULT_CNT_20M 24u
+#define FTM_DEFAULT_CNT_40M 16u
+#define FTM_DEFAULT_CNT_80M 11u
+/* To handle congestion env, set max dur/timeout */
+#define FTM_MAX_BURST_DUR_TMO_MS 128u
/* convenience macros */
#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10)
/* broadcom specific set to have more accurate data */
#define ENABLE_VHT_ACK
#define CH_MIN_5G_CHANNEL 34
-#define CH_MIN_2G_CHANNEL 1
+
+/* CUR ETH became obsolete with this major version onwards */
+#define RTT_IOV_CUR_ETH_OBSOLETE 12
+
+/* PROXD TIMEOUT */
+#define DHD_RTT_TIMER_INTERVAL_MS 5000u
+#define DHD_NAN_RTT_TIMER_INTERVAL_MS 20000u
struct rtt_noti_callback {
struct list_head list;
uint32 data32;
uint16 data16;
uint8 data8;
+ uint32 event_mask;
};
} ftm_config_param_info_t;
rtt_result_ver(uint16 tlvid, const uint8 *p_data);
static int
-dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len);
static int
-dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len);
static wifi_rate_t
dhd_rtt_convert_rate_to_host(uint32 ratespec);
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
+const char *
+ftm_cmdid_to_str(uint16 cmdid);
+#endif /* WL_CFG80211 && RTT_DEBUG */
+
#ifdef WL_CFG80211
static int
dhd_rtt_start(dhd_pub_t *dhd);
+static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr);
+static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd);
+static void dhd_rtt_timeout_work(struct work_struct *work);
#endif /* WL_CFG80211 */
static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0};
{WL_PROXD_E_OK, RTT_STATUS_SUCCESS}
};
-/* ftm tlv-id mapping */
-static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = {
- /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */
- { WL_PROXD_TLV_ID_NONE, "none" },
- { WL_PROXD_TLV_ID_METHOD, "method" },
- { WL_PROXD_TLV_ID_FLAGS, "flags" },
- { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" },
- { WL_PROXD_TLV_ID_TX_POWER, "tx power" },
- { WL_PROXD_TLV_ID_RATESPEC, "ratespec" },
- { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" },
- { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" },
- { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" },
- { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" },
- { WL_PROXD_TLV_ID_NUM_BURST, "num burst" },
- { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" },
- { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" },
- { WL_PROXD_TLV_ID_BSSID, "bssid" },
- { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" },
- { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" },
- { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" },
- { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" },
- { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" },
- { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" },
- { WL_PROXD_TLV_ID_LCI_REQ, "lci req" },
- { WL_PROXD_TLV_ID_LCI, "lci" },
- { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" },
- { WL_PROXD_TLV_ID_CIVIC, "civic" },
- { WL_PROXD_TLV_ID_AVAIL, "availability" },
- { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" },
- { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" },
- { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" },
- { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" },
- { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" },
- { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" },
- /* output - 512 + x */
- { WL_PROXD_TLV_ID_STATUS, "status" },
- { WL_PROXD_TLV_ID_COUNTERS, "counters" },
- { WL_PROXD_TLV_ID_INFO, "info" },
- { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" },
- { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" },
- { WL_PROXD_TLV_ID_SESSION_INFO, "session info" },
- { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" },
- { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" },
- /* debug tlvs can be added starting 1024 */
- { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" },
- { WL_PROXD_TLV_ID_COLLECT, "collect" },
- { WL_PROXD_TLV_ID_STRBUF, "result" },
- { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" },
- { WL_PROXD_TLV_ID_RI_RR, "ri_rr" },
- { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"},
- { WL_PROXD_TLV_ID_MF_STATS_DATA, "mf_stats_data"}
-};
-
static const ftm_strmap_entry_t ftm_event_type_loginfo[] = {
/* wl_proxd_event_type_t, text-string */
{ WL_PROXD_EVENT_NONE, "none" },
{ WL_PROXD_SESSION_STATE_NONE, "none" }
};
-/*
-* ranging-state --> text string mapping
-*/
-static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = {
- /* wl_proxd_ranging_state_t, text string */
- { WL_PROXD_RANGING_STATE_NONE, "none" },
- { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" },
- { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" },
- { WL_PROXD_RANGING_STATE_DONE, "done" },
-};
-
/*
* status --> text string mapping
*/
{ WL_PROXD_TMU_PICO_SEC, "ps" }
};
-#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK)
-#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
-#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
-#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
-#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
-
-#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE)
-#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \
- (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \
- (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC))
-#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0)
-#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0)
-#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0)
-#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0)
-#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
-#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
-#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
-#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \
- ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec))
-/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
-#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec)
-
struct ieee_80211_mcs_rate_info {
uint8 constellation_bits;
uint8 coding_q;
return "invalid";
}
-#ifdef RTT_DEBUG
-
+#if defined(WL_CFG80211) && defined(RTT_DEBUG)
/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */
#define DEF_STRMAP_ENTRY(id) { (id), #id }
{
return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map));
}
-#endif /* RTT_DEBUG */
+#endif /* WL_CFG80211 && RTT_DEBUG */
/*
* convert BCME_xxx error codes into related error strings
status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov,
proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN);
if (status != BCME_OK) {
- DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
+ DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
__FUNCTION__, p_subcmd_info->cmdid, status));
return status;
}
tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE;
if (tlvs_len < 0) {
- DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
+ DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
__FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE));
tlvs_len = 0;
}
wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize)
{
uint16 proxd_iovsize;
- uint16 kflags;
+ uint32 kflags;
wl_proxd_tlv_t *p_tlv;
wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL;
p_proxd_iov = kzalloc(proxd_iovsize, kflags);
if (p_proxd_iov == NULL) {
- DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
+ DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
return NULL;
}
}
#endif /* WL_CFG80211 */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-
/* gets the length and returns the version
* of the wl_proxd_collect_event_t version
*/
}
break;
default:
- DHD_ERROR(("%s: > Unsupported TLV ID %d\n",
+ DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n",
__FUNCTION__, tlvid));
break;
}
expected_rtt_result_ver = rtt_result_ver(tlvid, p_data);
switch (expected_rtt_result_ver) {
case WL_PROXD_RTT_RESULT_VERSION_1:
- ret = dhd_rtt_convert_results_to_host_v1((rtt_report_t *)ctx,
+ ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx,
p_data, tlvid, len);
break;
case WL_PROXD_RTT_RESULT_VERSION_2:
- ret = dhd_rtt_convert_results_to_host_v2((rtt_report_t *)ctx,
+ ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx,
p_data, tlvid, len);
break;
default:
- DHD_ERROR((" > Unsupported RTT_RESULT version\n"));
+ DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n"));
ret = BCME_UNSUPPORTED;
break;
}
ctx, p_data, len);
break;
case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA:
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n"));
DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32))));
- for (i = 0; i < (len/sizeof(chan_data_entry)); i++) {
+ for (i = 0; (uint16)i < (len/sizeof(chan_data_entry)); i++) {
uint32 *p = (uint32*)p_data;
chan_data_entry = ltoh32_ua(p + i);
DHD_RTT(("\t%u\n", chan_data_entry));
}
+ GCC_DIAGNOSTIC_POP();
break;
case WL_PROXD_TLV_ID_MF_STATS_DATA:
DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n"));
rtt_prhex("", p_data, len);
break;
default:
- DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid));
+ DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid));
ret = BCME_ERROR;
break;
}
ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
+ DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
__FUNCTION__, ret));
goto exit;
}
break;
}
if (ret != BCME_OK) {
- DHD_ERROR(("%s bad TLV ID : %d\n",
+ DHD_RTT_ERR(("%s bad TLV ID : %d\n",
__FUNCTION__, p_config_param_info->tlvid));
break;
}
p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data,
BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_ERROR(("%s: bcm_pack_xltv_entry() failed,"
+ DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed,"
" status=%d\n", __FUNCTION__, ret));
break;
}
return dhd_rtt_common_set_handler(dhd, &subcmd_info,
WL_PROXD_METHOD_FTM, session_id);
}
+#ifdef WL_NAN
+int
+dhd_rtt_delete_nan_session(dhd_pub_t *dhd)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg);
+ return BCME_OK;
+}
+#endif /* WL_NAN */
+/* API to find out if the given Peer Mac from FTM events
+* is nan-peer. Based on this we will handle the SESSION_END
+* event. For nan-peer FTM_SESSION_END event is ignored and handled in
+* nan-ranging-cancel or nan-ranging-end event.
+*/
+static bool
+dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac)
+{
+#ifdef WL_NAN
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ nan_ranging_inst_t *ranging_inst = NULL;
+ bool ret = FALSE;
+
+ if (cfg->nan_enable == FALSE || ETHER_ISNULLADDR(peer_mac)) {
+ goto exit;
+ }
+
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac);
+ if (ranging_inst) {
+ DHD_RTT((" RTT peer is of type NAN\n"));
+ ret = TRUE;
+ goto exit;
+ }
+exit:
+ return ret;
+#else
+ return FALSE;
+#endif /* WL_NAN */
+}
#ifdef WL_NAN
static int
struct wireless_dev *wdev = ndev_to_wdev(dev);
struct wiphy *wiphy = wdev->wiphy;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ wl_nan_ev_rng_rpt_ind_t range_res;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ NAN_MUTEX_LOCK();
+
+ bzero(&range_res, sizeof(range_res));
+
+ if (!rtt_status) {
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ if (!cfg->nan_enable) { /* If nan is not enabled report error */
+ err = BCME_NOTENABLED;
+ goto done;
+ }
+
+ /* check if new ranging session allowed */
+ if (!wl_cfgnan_ranging_allowed(cfg)) {
+ /* responder should be in progress because initiator requests are
+ * queued in DHD. Since initiator has more proef cancel responder
+ * sessions
+ */
+ wl_cfgnan_cancel_rng_responders(dev, cfg);
+ }
- nan_ranging_inst_t *ranging_inst;
ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
- &rtt_target->addr, 0, TRUE);
- if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
- WL_DBG(("Trigger nan based range request\n"));
- err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
- cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD);
- if (unlikely(err)) {
- WL_ERR(("Failed to trigger ranging, ret = (%d)\n", err));
- /* Send an event reporting the failure */
- err = wl_cfgvendor_send_as_rtt_legacy_event(cfg->wdev->wiphy,
- bcmcfg_to_prmry_ndev(cfg), NULL, RTT_STATUS_FAILURE);
- memset(ranging_inst, 0, sizeof(*ranging_inst));
- }
- cfg->nancfg.range_type = LEGACY_NAN_RTT;
+ &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ err = BCME_NORESOURCE;
+ goto done;
+ }
+
+ DHD_RTT(("Trigger nan based range request\n"));
+ err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE);
+ if (unlikely(err)) {
+ goto done;
+ }
+ ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED;
+ ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ /* schedule proxd timeout */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+done:
+ if (err) { /* notify failure RTT event to host */
+ DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err));
+ dhd_rtt_handle_nan_rtt_session_end(dhd, &rtt_target->addr);
+ /* try to reset geofence */
+ if (ranging_inst) {
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SCHED_DIR_TRIGGER_FAIL);
+ }
}
+ NAN_MUTEX_UNLOCK();
return err;
}
#endif /* WL_NAN */
FTM_IOC_BUFSZ, &proxd_iovsize);
if (p_proxd_iov == NULL) {
- DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n",
+ DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n",
__FUNCTION__, FTM_IOC_BUFSZ));
return BCME_NOMEM;
}
ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov,
all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : failed to set config\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : failed to set config\n", __FUNCTION__));
}
}
/* clean up */
chanspec = wf_chspec_80(center_chan, primary_chan);
break;
default:
- DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
+ DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width));
bw = -1;
break;
}
{
int err = BCME_OK;
int idx;
- rtt_status_info_t *rtt_status;
- NULL_CHECK(params, "params is NULL", err);
+ rtt_status_info_t *rtt_status = NULL;
+ struct net_device *dev = NULL;
+ NULL_CHECK(params, "params is NULL", err);
NULL_CHECK(dhd, "dhd is NULL", err);
+
+ dev = dhd_linux_get_primary_netdev(dhd);
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ NULL_CHECK(dev, "dev is NULL", err);
+
+ mutex_lock(&rtt_status->rtt_work_mutex);
if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) {
- DHD_ERROR(("doesn't support RTT \n"));
- return BCME_ERROR;
- }
- if (rtt_status->status != RTT_STOPPED) {
- DHD_ERROR(("rtt is already started\n"));
- return BCME_BUSY;
+ DHD_RTT_ERR(("doesn't support RTT \n"));
+ err = BCME_ERROR;
+ goto exit;
}
+
DHD_RTT(("%s enter\n", __FUNCTION__));
+ if (params->rtt_target_cnt > 0) {
+#ifdef WL_NAN
+ /* cancel ongoing geofence RTT if there */
+ if ((err = wl_cfgnan_suspend_geofence_rng_session(dev,
+ NULL, RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0)) != BCME_OK) {
+ goto exit;
+ }
+#endif /* WL_NAN */
+ } else {
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ mutex_lock(&rtt_status->rtt_mutex);
+ if (rtt_status->status != RTT_STOPPED) {
+ DHD_RTT_ERR(("rtt is already started\n"));
+ err = BCME_BUSY;
+ goto exit;
+ }
memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt;
memcpy(rtt_status->rtt_config.target_info,
params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt));
rtt_status->status = RTT_STARTED;
+ DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt));
/* start to measure RTT from first device */
/* find next target to trigger RTT */
for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
}
if (idx < rtt_status->rtt_config.rtt_target_cnt) {
DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
+ rtt_status->rtt_sched_reason = RTT_SCHED_HOST_TRIGGER;
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
schedule_work(&rtt_status->work);
}
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
+ mutex_unlock(&rtt_status->rtt_work_mutex);
return err;
}
-int
-dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+#ifdef WL_NAN
+void
+dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd)
{
- int err = BCME_OK;
-#ifdef WL_CFG8011
- int i = 0, j = 0;
- rtt_status_info_t *rtt_status;
- rtt_results_header_t *entry, *next;
- rtt_result_t *rtt_result, *next2;
- struct rtt_noti_callback *iter;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
- if (rtt_status->status == RTT_STOPPED) {
- DHD_ERROR(("rtt is not started\n"));
- return BCME_OK;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
}
- DHD_RTT(("%s enter\n", __FUNCTION__));
- mutex_lock(&rtt_status->rtt_mutex);
- for (i = 0; i < mac_cnt; i++) {
- for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
- if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
- ETHER_ADDR_LEN)) {
- rtt_status->rtt_config.target_info[j].disable = TRUE;
- }
- }
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+ memset_s(&rtt_status->geofence_cfg, sizeof(rtt_status->geofence_cfg),
+ 0, sizeof(rtt_status->geofence_cfg));
+
+ /* initialize non zero params of geofence cfg */
+ rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_status->geofence_cfg.geofence_rtt_interval = DHD_RTT_RETRY_TIMER_INTERVAL_MS;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return;
+}
+
+#ifdef RTT_GEOFENCE_CONT
+void
+dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
}
- if (rtt_status->all_cancel) {
- /* cancel all of request */
- rtt_status->status = RTT_STOPPED;
- DHD_RTT(("current RTT process is cancelled\n"));
- /* remove the rtt results in cache */
- if (!list_empty(&rtt_status->rtt_results_cache)) {
- /* Iterate rtt_results_header list */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(entry, next,
- &rtt_status->rtt_results_cache, list) {
- list_del(&entry->list);
- /* Iterate rtt_result list */
- list_for_each_entry_safe(rtt_result, next2,
- &entry->result_list, list) {
- list_del(&rtt_result->list);
- kfree(rtt_result);
- }
- kfree(entry);
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- }
- /* send the rtt complete event to wake up the user process */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- /* reinitialize the HEAD */
- INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
- /* clear information for rtt_config */
- rtt_status->rtt_config.rtt_target_cnt = 0;
- memset(rtt_status->rtt_config.target_info, 0,
- TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
- rtt_status->cur_idx = 0;
- dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
- dhd_rtt_ftm_enable(dhd, FALSE);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ *geofence_cont = rtt_status->geofence_cfg.geofence_cont;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+
+void
+dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
}
- mutex_unlock(&rtt_status->rtt_mutex);
-#endif /* WL_CFG80211 */
- return err;
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.geofence_cont = geofence_cont;
+ DHD_RTT(("dhd_rtt_set_geofence_cont_override, geofence_cont = %d\n",
+ rtt_status->geofence_cfg.geofence_cont));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
}
+#endif /* RTT_GEOFENCE_CONT */
-#ifdef WL_CFG80211
-static int
-dhd_rtt_start(dhd_pub_t *dhd)
+#ifdef RTT_GEOFENCE_INTERVAL
+void
+dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval)
{
- int err = BCME_OK;
- char eabuf[ETHER_ADDR_STR_LEN];
- char chanbuf[CHANSPEC_STR_LEN];
- int pm = PM_OFF;
- int ftm_cfg_cnt = 0;
- int ftm_param_cnt = 0;
- uint32 rspec = 0;
- ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
- ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
- rtt_target_info_t *rtt_target;
- rtt_status_info_t *rtt_status;
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.geofence_rtt_interval = interval;
+ DHD_RTT(("dhd_rtt_set_geofence_rtt_interval: geofence interval = %d\n",
+ rtt_status->geofence_cfg.geofence_rtt_interval));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
+#endif /* RTT_GEOFENCE_INTERVAL */
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+/* sets geofence role concurrency state TRUE/FALSE */
+void
+dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
+ }
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.role_concurr_state = state;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
- DHD_RTT(("Enter %s\n", __FUNCTION__));
- if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
- err = BCME_RANGE;
- DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_ERROR(("STA is set as Target/Responder \n"));
- return BCME_ERROR;
- }
- goto exit;
+/* returns TRUE if geofence role concurrency constraint exists */
+bool
+dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return FALSE;
}
- if (RTT_IS_STOPPED(rtt_status)) {
- DHD_RTT(("RTT is stopped\n"));
- goto exit;
+ return rtt_status->geofence_cfg.role_concurr_state;
+}
+
+int8
+dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return 0;
}
- rtt_status->pm = PM_OFF;
- err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
- if (err) {
- DHD_ERROR(("Failed to get the PM value\n"));
- } else {
- err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
- if (err) {
- DHD_ERROR(("Failed to set the PM\n"));
- rtt_status->pm_restore = FALSE;
- } else {
- rtt_status->pm_restore = TRUE;
- }
+ return rtt_status->geofence_cfg.geofence_target_cnt;
+}
+
+/* sets geofence rtt state TRUE/FALSE */
+void
+dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ return;
}
+ GEOFENCE_RTT_LOCK(rtt_status);
+ rtt_status->geofence_cfg.rtt_in_progress = state;
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+}
- mutex_lock(&rtt_status->rtt_mutex);
- /* Get a target information */
- rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
- mutex_unlock(&rtt_status->rtt_mutex);
- DHD_RTT(("%s enter\n", __FUNCTION__));
+/* returns TRUE if geofence rtt is in progress */
+bool
+dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-#ifdef WL_NAN
- if (rtt_target->peer == RTT_PEER_NAN &&
- (!ETHER_ISNULLADDR(rtt_target->addr.octet))) {
- err = dhd_rtt_nan_start_session(dhd, rtt_target);
- /* Irrespectivily go to exit */
- goto exit;
+ if (!rtt_status) {
+ return FALSE;
}
-#endif /* WL_NAN */
- if (!RTT_IS_ENABLED(rtt_status)) {
- /* enable ftm */
- err = dhd_rtt_ftm_enable(dhd, TRUE);
- if (err) {
- DHD_ERROR(("failed to enable FTM (%d)\n", err));
- goto exit;
- }
+ return rtt_status->geofence_cfg.rtt_in_progress;
+}
+
+/* returns geofence RTT target list Head */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* head = NULL;
+
+ if (!rtt_status) {
+ return NULL;
}
- /* delete session of index default sesession */
- err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
- if (err < 0 && err != BCME_NOTFOUND) {
- DHD_ERROR(("failed to delete session of FTM (%d)\n", err));
- goto exit;
+ if (rtt_status->geofence_cfg.geofence_target_cnt) {
+ head = &rtt_status->geofence_cfg.geofence_target_info[0];
}
- rtt_status->status = RTT_ENABLED;
- memset(ftm_configs, 0, sizeof(ftm_configs));
- memset(ftm_params, 0, sizeof(ftm_params));
- /* configure the session 1 as initiator */
- ftm_configs[ftm_cfg_cnt].enable = TRUE;
- ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR;
- dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
- ftm_configs, ftm_cfg_cnt);
+ return head;
+}
- memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
- err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0,
- ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (err) {
- WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+int8
+dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd)
+{
+ int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
goto exit;
}
- memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN);
- /* local mac address */
- if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) {
- ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr;
- ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR;
- bcm_ether_ntoa(&rtt_target->local_addr, eabuf);
- DHD_RTT((">\t local %s\n", eabuf));
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (target_cnt == 0) {
+ goto exit;
+ }
+
+ cur_idx = rtt_status->geofence_cfg.cur_target_idx;
+ ASSERT(cur_idx <= target_cnt);
+
+exit:
+ return cur_idx;
+}
+
+void
+dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return;
+ }
+
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 0) {
+ /* Invalidate current idx if no targets */
+ rtt_status->geofence_cfg.cur_target_idx =
+ DHD_RTT_INVALID_TARGET_INDEX;
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+ return;
+ }
+ rtt_status->geofence_cfg.cur_target_idx++;
+
+ if (rtt_status->geofence_cfg.cur_target_idx >=
+ rtt_status->geofence_cfg.geofence_target_cnt) {
+ /* Reset once all targets done */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+}
+
+/* returns geofence current RTT target */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ rtt_geofence_target_info_t* cur_target = NULL;
+ int cur_idx = 0;
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx >= 0) {
+ cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx];
+ }
+
+ return cur_target;
+}
+
+/* returns geofence target from list for the peer */
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index)
+{
+ int8 i;
+ rtt_status_info_t *rtt_status;
+ int target_cnt;
+ rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL;
+
+ rtt_status = GET_RTTSTATE(dhd);
+
+ if (!rtt_status) {
+ return NULL;
+ }
+
+ target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+
+ /* Loop through to find target */
+ for (i = 0; i < target_cnt; i++) {
+ if (geofence_target_info[i].valid == FALSE) {
+ break;
+ }
+ if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr,
+ ETHER_ADDR_LEN)) {
+ *index = i;
+ tgt = &geofence_target_info[i];
+ }
+ }
+ if (!tgt) {
+ DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list,"
+ " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr)));
+ }
+ return tgt;
+}
+
+/* add geofence target to the target list */
+int
+dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, index;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ /* Get the geofence_target via peer addr, index param is dumm here */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index);
+ if (geofence_target_info) {
+ DHD_RTT(("Duplicate geofencing RTT add request dropped\n"));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
+ if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) {
+ DHD_RTT(("Queue full, Geofencing RTT add request dropped\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+
+ /* Add Geofence RTT request and increment target count */
+ geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
+ /* src and dest buffer len same, pointers of same DS statically allocated */
+ (void)memcpy_s(&geofence_target_info[geofence_target_cnt],
+ sizeof(geofence_target_info[geofence_target_cnt]), target,
+ sizeof(*target));
+ geofence_target_info[geofence_target_cnt].valid = TRUE;
+ rtt_status->geofence_cfg.geofence_target_cnt++;
+ if (rtt_status->geofence_cfg.geofence_target_cnt == 1) {
+ /* Adding first target */
+ rtt_status->geofence_cfg.cur_target_idx = 0;
+ }
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* removes geofence target from the target list */
+int
+dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_geofence_target_info_t *geofence_target_info;
+ int8 geofence_target_cnt, j, index = 0;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ GEOFENCE_RTT_LOCK(rtt_status);
+
+ geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
+ if (geofence_target_cnt == 0) {
+ DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n"));
+ ASSERT(0);
+ goto exit;
+ }
+
+ /* Get the geofence_target via peer addr */
+ geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index);
+ if (geofence_target_info == NULL) {
+ DHD_RTT(("Geofencing RTT target not found, remove request dropped\n"));
+ err = BCME_NOTFOUND;
+ goto exit;
+ }
+
+ /* left shift all the valid entries, as we dont keep holes in list */
+ for (j = index; (j+1) < geofence_target_cnt; j++) {
+ if (geofence_target_info[j].valid == TRUE) {
+ /*
+ * src and dest buffer len same, pointers of same DS
+ * statically allocated
+ */
+ (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]),
+ &geofence_target_info[j + 1],
+ sizeof(geofence_target_info[j + 1]));
+ } else {
+ break;
+ }
+ }
+ rtt_status->geofence_cfg.geofence_target_cnt--;
+ if ((rtt_status->geofence_cfg.geofence_target_cnt == 0) ||
+ (index == rtt_status->geofence_cfg.cur_target_idx)) {
+ /* Move cur_idx to next target */
+ dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
+ } else if (index < rtt_status->geofence_cfg.cur_target_idx) {
+ /* Decrement cur index if cur target position changed */
+ rtt_status->geofence_cfg.cur_target_idx--;
+ }
+
+exit:
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+/* deletes/empty geofence target list */
+int
+dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+
+ int err = BCME_OK;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t),
+ 0, sizeof(rtt_geofence_cfg_t));
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ return err;
+}
+
+int
+dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd)
+{
+ rtt_geofence_target_info_t *geofence_target_info;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ int ret = BCME_OK;
+ bool geofence_state;
+ bool role_concurrency_state;
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ NAN_MUTEX_LOCK();
+
+ if ((cfg->nan_init_state == FALSE) ||
+ (cfg->nan_enable == FALSE)) {
+ ret = BCME_NOTENABLED;
+ goto done;
+ }
+ geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
+ role_concurrency_state = dhd_rtt_get_role_concurrency_state(dhd);
+
+ DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: sched_reason = %d\n",
+ rtt_status->rtt_sched_reason));
+
+ if (geofence_state == TRUE || role_concurrency_state == TRUE) {
+ ret = BCME_ERROR;
+ DHD_RTT_ERR(("geofencing constraint , sched request dropped,"
+ " geofence_state = %d, role_concurrency_state = %d\n",
+ geofence_state, role_concurrency_state));
+ goto done;
+ }
+
+ /* Get current geofencing target */
+ geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
+
+ /* call cfg API for trigerring geofencing RTT */
+ if (geofence_target_info) {
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev,
+ &geofence_target_info->peer_addr);
+ if (rtt_invalid_reason != RTT_STATE_VALID) {
+ ret = BCME_BUSY;
+ DHD_RTT_ERR(("DRV State is not valid for RTT, "
+ "invalid_state = %d\n", rtt_invalid_reason));
+ goto done;
+ }
+
+ ret = wl_cfgnan_trigger_geofencing_ranging(dev,
+ &geofence_target_info->peer_addr);
+ if (ret == BCME_OK) {
+ dhd_rtt_set_geofence_rtt_state(dhd, TRUE);
+ }
+ } else {
+ DHD_RTT(("No RTT target to schedule\n"));
+ ret = BCME_NOTFOUND;
+ }
+
+done:
+ NAN_MUTEX_UNLOCK();
+ return ret;
+}
+#endif /* WL_NAN */
+
+#ifdef WL_CFG80211
+#ifdef WL_NAN
+static void
+dhd_rtt_retry(dhd_pub_t *dhd)
+{
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rtt_geofence_target_info_t *geofence_target = NULL;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ geofence_target = dhd_rtt_get_geofence_current_target(dhd);
+ if (!geofence_target) {
+ DHD_RTT(("dhd_rtt_retry: geofence target null\n"));
+ goto exit;
+ }
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ DHD_RTT(("dhd_rtt_retry: ranging instance null\n"));
+ goto exit;
+ }
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ ranging_inst, RTT_SCHED_RTT_RETRY_GEOFENCE);
+
+exit:
+ return;
+}
+
+static void
+dhd_rtt_retry_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+ struct net_device *dev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+
+ if (!work) {
+ goto exit;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ rtt_status = container_of(work, rtt_status_info_t, rtt_retry_timer.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ goto exit;
+ }
+ dev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wl_get_cfg(dev);
+
+ NAN_MUTEX_LOCK();
+ mutex_lock(&rtt_status->rtt_mutex);
+ (void) dhd_rtt_retry(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+ NAN_MUTEX_UNLOCK();
+
+exit:
+ return;
+}
+#endif /* WL_NAN */
+
+/*
+ * Return zero (0)
+ * for valid RTT state
+ * means if RTT is applicable
+ */
+uint8
+dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr)
+{
+ uint8 invalid_reason = RTT_STATE_VALID;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+
+ UNUSED_PARAMETER(cfg);
+ UNUSED_PARAMETER(invalid_reason);
+
+ /* Make sure peer addr is not NULL in caller */
+ ASSERT(peer_addr);
+ /*
+ * Keep adding prohibited drv states here
+ * Only generic conditions which block
+ * All RTTs like NDP connection
+ */
+
+#ifdef WL_NAN
+ if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) {
+ invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST;
+ DHD_RTT(("NDP in progress/connected, RTT prohibited\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
+ /* Remove below #defines once more exit calls come */
+#ifdef WL_NAN
+exit:
+#endif /* WL_NAN */
+ return invalid_reason;
+}
+#endif /* WL_CFG80211 */
+
+void
+dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason)
+{
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status == NULL) {
+ ASSERT(0);
+ } else {
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+ rtt_status->rtt_sched_reason = sched_reason;
+ schedule_work(&rtt_status->work);
+ }
+ return;
+}
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+{
+ int err = BCME_OK;
+#ifdef WL_CFG80211
+ int i = 0, j = 0;
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *rtt_result, *next2;
+ struct rtt_noti_callback *iter;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (rtt_status->status == RTT_STOPPED) {
+ DHD_RTT_ERR(("rtt is not started\n"));
+ return BCME_OK;
+ }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ mutex_lock(&rtt_status->rtt_mutex);
+ for (i = 0; i < mac_cnt; i++) {
+ for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+ if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
+ ETHER_ADDR_LEN)) {
+ rtt_status->rtt_config.target_info[j].disable = TRUE;
+ }
+ }
+ }
+ if (rtt_status->all_cancel) {
+ /* cancel all of request */
+ rtt_status->status = RTT_STOPPED;
+ DHD_RTT(("current RTT process is cancelled\n"));
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ kfree(entry);
+ }
+ GCC_DIAGNOSTIC_POP();
+ }
+ /* send the rtt complete event to wake up the user process */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset(rtt_status->rtt_config.target_info, 0,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+#ifdef WL_NAN
+ dhd_rtt_delete_nan_session(dhd);
+#endif /* WL_NAN */
+ dhd_rtt_ftm_enable(dhd, FALSE);
+ }
+ mutex_unlock(&rtt_status->rtt_mutex);
+#endif /* WL_CFG80211 */
+ return err;
+}
+
+#ifdef WL_CFG80211
+static void
+dhd_rtt_timeout(dhd_pub_t *dhd)
+{
+ rtt_status_info_t *rtt_status;
+#ifndef DHD_DUMP_ON_RTT_TIMEOUT
+ rtt_target_info_t *rtt_target = NULL;
+ rtt_target_info_t *rtt_target_info = NULL;
+#ifdef WL_NAN
+ nan_ranging_inst_t *ranging_inst = NULL;
+ int ret = BCME_OK;
+ uint32 status;
+ struct net_device *ndev = dhd_linux_get_primary_netdev(dhd);
+ struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+#endif /* WL_NAN */
+#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ if (!rtt_status) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT status\n"));
+ goto exit;
+ }
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n"));
+ goto exit;
+ }
+
+#ifdef DHD_DUMP_ON_RTT_TIMEOUT
+ /* Dump, and Panic depending on memdump.info */
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* Behave based on user memdump info */
+ dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#else /* DHD_DUMP_ON_RTT_TIMEOUT */
+ /* Cancel RTT for target and proceed to next target */
+ rtt_target_info = rtt_status->rtt_config.target_info;
+ if ((!rtt_target_info) ||
+ (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) {
+ goto exit;
+ }
+ rtt_target = &rtt_target_info[rtt_status->cur_idx];
+ WL_ERR(("Proxd timer expired for Target: "MACDBG" \n", MAC2STRDBG(&rtt_target->addr)));
+#ifdef WL_NAN
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, &rtt_target->addr);
+ if (!ranging_inst) {
+ goto exit;
+ }
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
+ __FUNCTION__, ret, status));
+ }
+ } else
+#endif /* WL_NAN */
+ {
+ /* For Legacy RTT */
+ dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ }
+ dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr);
+ dhd_rtt_handle_rtt_session_end(dhd);
+#endif /* DHD_DUMP_ON_RTT_TIMEOUT */
+exit:
+ return;
+}
+
+static void
+dhd_rtt_timeout_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status = NULL;
+ dhd_pub_t *dhd = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif // endif
+ rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif // endif
+
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+ mutex_lock(&rtt_status->rtt_mutex);
+ (void) dhd_rtt_timeout(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+}
+
+static int
+dhd_rtt_start(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int err_at = 0;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ char chanbuf[CHANSPEC_STR_LEN];
+ int pm = PM_OFF;
+ int ftm_cfg_cnt = 0;
+ int ftm_param_cnt = 0;
+ uint32 rspec = 0;
+ ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ rtt_target_info_t *rtt_target;
+ rtt_status_info_t *rtt_status;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ int rtt_sched_type = RTT_TYPE_INVALID;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+ mutex_lock(&rtt_status->rtt_work_mutex);
+
+ DHD_RTT(("Enter %s\n", __FUNCTION__));
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT(("No Directed RTT target to process, check for geofence\n"));
+ goto geofence;
+ }
+
+ if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+ err = BCME_RANGE;
+ err_at = 1;
+ DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT_ERR(("STA is set as Target/Responder \n"));
+ err = BCME_ERROR;
+ err_at = 1;
+ }
+ goto exit;
+ }
+
+ rtt_status->pm = PM_OFF;
+ err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to get the PM value\n"));
+ } else {
+ err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (err) {
+ DHD_RTT_ERR(("Failed to set the PM\n"));
+ rtt_status->pm_restore = FALSE;
+ } else {
+ rtt_status->pm_restore = TRUE;
+ }
+ }
+
+ mutex_lock(&rtt_status->rtt_mutex);
+ /* Get a target information */
+ rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ mutex_unlock(&rtt_status->rtt_mutex);
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+
+ if (ETHER_ISNULLADDR(rtt_target->addr.octet)) {
+ err = BCME_BADADDR;
+ err_at = 2;
+ DHD_RTT(("RTT Target addr is NULL\n"));
+ goto exit;
+ }
+
+ /* check for dp/others concurrency */
+ rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr);
+ if (rtt_invalid_reason != RTT_STATE_VALID) {
+ err = BCME_BUSY;
+ err_at = 3;
+ DHD_RTT(("DRV State is not valid for RTT\n"));
+ goto exit;
+ }
+
+#ifdef WL_NAN
+ if (rtt_target->peer == RTT_PEER_NAN) {
+ rtt_sched_type = RTT_TYPE_NAN_DIRECTED;
+ rtt_status->status = RTT_ENABLED;
+ /* Ignore return value..failure taken care inside the API */
+ dhd_rtt_nan_start_session(dhd, rtt_target);
+ goto exit;
+ }
+#endif /* WL_NAN */
+ if (!RTT_IS_ENABLED(rtt_status)) {
+ /* enable ftm */
+ err = dhd_rtt_ftm_enable(dhd, TRUE);
+ if (err) {
+ DHD_RTT_ERR(("failed to enable FTM (%d)\n", err));
+ err_at = 5;
+ goto exit;
+ }
+ }
+
+ /* delete session of index default sesession */
+ err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+ if (err < 0 && err != BCME_NOTFOUND) {
+ DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err));
+ err_at = 6;
+ goto exit;
+ }
+ rtt_status->status = RTT_ENABLED;
+ memset(ftm_configs, 0, sizeof(ftm_configs));
+ memset(ftm_params, 0, sizeof(ftm_params));
+
+ /* configure the session 1 as initiator */
+ ftm_configs[ftm_cfg_cnt].enable = TRUE;
+ ftm_configs[ftm_cfg_cnt++].flags =
+ WL_PROXD_SESSION_FLAG_INITIATOR | WL_PROXD_SESSION_FLAG_RANDMAC;
+ dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
+ ftm_configs, ftm_cfg_cnt);
+
+ memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
+
+ /* Rand Mac for newer version in place of cur_eth */
+ if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) {
+ err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0,
+ ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (err) {
+ DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+ err_at = 7;
+ goto exit;
+ }
+ memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN);
+
+ /* local mac address */
+ if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) {
+ ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr;
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR;
+ bcm_ether_ntoa(&rtt_target->local_addr, eabuf);
+ DHD_RTT((">\t local %s\n", eabuf));
+ }
}
/* target's mac address */
if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) {
DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst));
}
/* number of frame per burst */
- if (rtt_target->num_frames_per_burst == 0) {
- rtt_target->num_frames_per_burst =
- CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M :
- CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M :
- FTM_DEFAULT_CNT_80M;
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ if (CHSPEC_IS80(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
+ } else if (CHSPEC_IS40(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M;
+ } else if (CHSPEC_IS20(rtt_target->chanspec)) {
+ rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M;
}
ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst);
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM;
DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst));
+
/* FTM retry count */
if (rtt_target->num_retries_per_ftm) {
ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm;
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD;
DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period));
}
+ /* Setting both duration and timeout to MAX duration
+ * to handle the congestion environments.
+ * Hence ignoring the user config.
+ */
/* burst-duration */
+ rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS;
if (rtt_target->burst_duration) {
ftm_params[ftm_param_cnt].data_intvl.intvl =
htol32(rtt_target->burst_duration); /* ms */
DHD_RTT((">\t burst duration : %d ms\n",
rtt_target->burst_duration));
}
+ /* burst-timeout */
+ rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS;
+ if (rtt_target->burst_timeout) {
+ ftm_params[ftm_param_cnt].data_intvl.intvl =
+ htol32(rtt_target->burst_timeout); /* ms */
+ ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT;
+ DHD_RTT((">\t burst timeout : %d ms\n",
+ rtt_target->burst_timeout));
+ }
+ /* event_mask..applicable for only Legacy RTT.
+ * For nan-rtt config happens from firmware
+ */
+ ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
+ (1 << WL_PROXD_EVENT_SESSION_END));
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
+
if (rtt_target->bw && rtt_target->preamble) {
bool use_default = FALSE;
int nss;
int mcs;
switch (rtt_target->preamble) {
- case RTT_PREAMBLE_LEGACY:
- rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
- rspec |= WL_RATE_6M;
- break;
- case RTT_PREAMBLE_HT:
- rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
- mcs = 0; /* default MCS 0 */
- rspec |= mcs;
- break;
- case RTT_PREAMBLE_VHT:
- rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
- mcs = 0; /* default MCS 0 */
- nss = 1; /* default Nss = 1 */
- rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
- break;
- default:
- DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble));
- use_default = TRUE;
- break;
+ case RTT_PREAMBLE_LEGACY:
+ rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
+ rspec |= WL_RATE_6M;
+ break;
+ case RTT_PREAMBLE_HT:
+ rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
+ mcs = 0; /* default MCS 0 */
+ rspec |= mcs;
+ break;
+ case RTT_PREAMBLE_VHT:
+ rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+ mcs = 0; /* default MCS 0 */
+ nss = 1; /* default Nss = 1 */
+ rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+ break;
+ default:
+ DHD_RTT(("doesn't support this preamble : %d\n",
+ rtt_target->preamble));
+ use_default = TRUE;
+ break;
}
switch (rtt_target->bw) {
- case RTT_BW_20:
- rspec |= WL_RSPEC_BW_20MHZ;
- break;
- case RTT_BW_40:
- rspec |= WL_RSPEC_BW_40MHZ;
- break;
- case RTT_BW_80:
- rspec |= WL_RSPEC_BW_80MHZ;
- break;
- default:
- DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
- use_default = TRUE;
- break;
+ case RTT_BW_20:
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case RTT_BW_40:
+ rspec |= WL_RSPEC_BW_40MHZ;
+ break;
+ case RTT_BW_80:
+ rspec |= WL_RSPEC_BW_80MHZ;
+ break;
+ default:
+ DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
+ use_default = TRUE;
+ break;
}
if (!use_default) {
ftm_params[ftm_param_cnt].data32 = htol32(rspec);
}
dhd_set_rand_mac_oui(dhd);
dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL,
- ftm_params, ftm_param_cnt);
+ ftm_params, ftm_param_cnt);
+ rtt_sched_type = RTT_TYPE_LEGACY;
err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE);
if (err) {
- DHD_ERROR(("failed to start session of FTM : error %d\n", err));
+ DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err));
+ err_at = 8;
+ } else {
+ /* schedule proxd timeout */
+ schedule_delayed_work(&rtt_status->proxd_timeout,
+ msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
+
}
+
+ goto exit;
+geofence:
+#ifdef WL_NAN
+ /* sched geofencing rtt */
+ rtt_sched_type = RTT_TYPE_NAN_GEOFENCE;
+ if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) {
+ DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err));
+ err_at = 9;
+ }
+#endif /* WL_NAN */
+
exit:
if (err) {
- DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
+ /* RTT Failed */
+ DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d,"
+ " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n"
+ " sched_reason = %d",
+ err, err_at, rtt_sched_type, rtt_invalid_reason,
+ rtt_status->rtt_sched_reason));
rtt_status->status = RTT_STOPPED;
/* disable FTM */
dhd_rtt_ftm_enable(dhd, FALSE);
if (rtt_status->pm_restore) {
pm = PM_FAST;
- DHD_ERROR(("pm_restore =%d func =%s \n",
+ DHD_RTT_ERR(("pm_restore =%d func =%s \n",
rtt_status->pm_restore, __FUNCTION__));
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to set PM \n"));
+ DHD_RTT_ERR(("Failed to set PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
}
}
+ mutex_unlock(&rtt_status->rtt_work_mutex);
return err;
}
#endif /* WL_CFG80211 */
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
goto exit;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
if (!cb) {
err = -ENOMEM;
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
cb = iter;
list_del(&cb->list);
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
spin_unlock_bh(¬i_list_lock);
if (cb) {
#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"}
static int
-dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data,
+dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
uint16 tlvid, uint16 len)
{
int i;
wl_proxd_session_state_t session_state;
wl_proxd_status_t proxd_status;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct timespec ts;
+ struct osl_timespec ts;
#endif /* LINUX_VER >= 2.6.39 */
uint32 ratespec;
uint32 avg_dist;
int32 dist = 0;
uint8 num_ftm = 0;
char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
BCM_REFERENCE(ftm_frame_types);
BCM_REFERENCE(dist);
BCM_REFERENCE(tof_phy_error);
BCM_REFERENCE(bitflips);
BCM_REFERENCE(snr);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
NULL_CHECK(rtt_report, "rtt_report is NULL", err);
NULL_CHECK(p_data, "p_data is NULL", err);
session_state = ltoh16_ua(&p_data_info->state);
proxd_status = ltoh32_ua(&p_data_info->status);
bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
- ftm_session_state_value_to_logstr(session_state);
ftm_status_value_to_logstr(proxd_status);
DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
eabuf,
/* time stamp */
/* get the time elapsed from boot time */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- get_monotonic_boottime(&ts);
+ osl_get_monotonic_boottime(&ts);
rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
#endif /* LINUX_VER >= 2.6.39 */
}
DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
" tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
- " target_bitflips=%d dist=%d rtt=%d%s status %s"
- " Type %s coreid=%d\n",
+ " target_bitflips=%d dist=%d rtt=%d%s status %s"
+ " Type %s coreid=%d\n",
+ i, p_sample->id, rssi, snr,
+ bitflips, tof_phy_error, tof_phy_tgt_error,
+ tof_target_snr,
+ tof_target_bitflips, dist,
+ ltoh32_ua(&p_sample->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
+ ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
+ ftm_frame_types[i % num_ftm], p_sample->coreid));
+ p_sample++;
+ }
+ }
+ return err;
+}
+
+static int
+dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
+ uint16 tlvid, uint16 len)
+{
+ int i;
+ int err = BCME_OK;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ wl_proxd_result_flags_t flags;
+ wl_proxd_session_state_t session_state;
+ wl_proxd_status_t proxd_status;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct osl_timespec ts;
+#endif /* LINUX_VER >= 2.6.39 */
+ uint32 ratespec;
+ uint32 avg_dist;
+ const wl_proxd_rtt_result_v2_t *p_data_info = NULL;
+ const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL;
+ const wl_proxd_rtt_sample_v2_t *p_sample = NULL;
+ uint16 num_rtt = 0;
+ wl_proxd_intvl_t rtt;
+ wl_proxd_intvl_t p_time;
+ uint16 snr = 0, bitflips = 0;
+ wl_proxd_phy_error_t tof_phy_error = 0;
+ wl_proxd_phy_error_t tof_phy_tgt_error = 0;
+ wl_proxd_snr_t tof_target_snr = 0;
+ wl_proxd_bitflips_t tof_target_bitflips = 0;
+ int16 rssi = 0;
+ int32 dist = 0;
+ uint32 chanspec = 0;
+ uint8 num_ftm = 0;
+ char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_report_t *rtt_report = &(rtt_result->report);
+
+ BCM_REFERENCE(ftm_frame_types);
+ BCM_REFERENCE(dist);
+ BCM_REFERENCE(rssi);
+ BCM_REFERENCE(tof_target_bitflips);
+ BCM_REFERENCE(tof_target_snr);
+ BCM_REFERENCE(tof_phy_tgt_error);
+ BCM_REFERENCE(tof_phy_error);
+ BCM_REFERENCE(bitflips);
+ BCM_REFERENCE(snr);
+ BCM_REFERENCE(chanspec);
+ BCM_REFERENCE(session_state);
+ BCM_REFERENCE(ftm_session_state_value_to_logstr);
+
+ NULL_CHECK(rtt_report, "rtt_report is NULL", err);
+ NULL_CHECK(p_data, "p_data is NULL", err);
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data;
+ /* unpack and format 'flags' for display */
+ flags = ltoh16_ua(&p_data_info->flags);
+ /* session state and status */
+ session_state = ltoh16_ua(&p_data_info->state);
+ proxd_status = ltoh32_ua(&p_data_info->status);
+ bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
+
+ if (proxd_status != BCME_OK) {
+ DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) "
+ "num_meas_ota %d num_valid_rtt %d result_flags %x\n",
+ eabuf, session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status, ftm_status_value_to_logstr(proxd_status),
+ p_data_info->num_meas, p_data_info->num_valid_rtt,
+ p_data_info->flags));
+ } else {
+ DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
+ eabuf, session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status, ftm_status_value_to_logstr(proxd_status)));
+ }
+ /* show avg_dist (1/256m units), burst_num */
+ avg_dist = ltoh32_ua(&p_data_info->avg_dist);
+ if (avg_dist == 0xffffffff) { /* report 'failure' case */
+ DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt)); /* in a session */
+ avg_dist = FTM_INVALID;
+ } else {
+ DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d "
+ "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */
+ ((avg_dist & 0xff) * 625) >> 4,
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt,
+ p_data_info->num_ftm, p_data_info->num_meas,
+ p_data_info->flags)); /* in a session */
+ }
+ rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas;
+ rtt_result->rtt_detail.result_flags = p_data_info->flags;
+ /* show 'avg_rtt' sample */
+ /* in v2, avg_rtt is the first element of the variable rtt[] */
+ p_sample_avg = &p_data_info->rtt[0];
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
+ DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d"
+ "ratespec=0x%08x chanspec=0x%08x\n",
+ (int16) ltoh16_ua(&p_sample_avg->rssi),
+ ltoh32_ua(&p_sample_avg->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
+ ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
+ ltoh32_ua(&p_sample_avg->ratespec),
+ ltoh32_ua(&p_sample_avg->chanspec)));
+
+ /* set peer address */
+ rtt_report->addr = p_data_info->peer;
+
+ /* burst num */
+ rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+
+ /* success num */
+ rtt_report->success_num = p_data_info->num_valid_rtt;
+
+ /* num-ftm configured */
+ rtt_report->ftm_num = p_data_info->num_ftm;
+
+ /* actual number of FTM supported by peer */
+ rtt_report->num_per_burst_peer = p_data_info->num_ftm;
+ rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+
+ /* status */
+ rtt_report->status = ftm_get_statusmap_info(proxd_status,
+ &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+
+ /* Framework expects status as SUCCESS else all results will be
+ * set to zero even if we have partial valid result.
+ * So setting status as SUCCESS if we have a valid_rtt
+ * On burst timeout we stop burst with "timeout" reason and
+ * on msch end we set status as "cancel"
+ */
+ if ((proxd_status == WL_PROXD_E_TIMEOUT ||
+ proxd_status == WL_PROXD_E_CANCELED) &&
+ rtt_report->success_num) {
+ rtt_report->status = RTT_STATUS_SUCCESS;
+ }
+
+ /* rssi (0.5db) */
+ rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2;
+
+ /* rx rate */
+ ratespec = ltoh32_ua(&p_sample_avg->ratespec);
+ rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+
+ /* tx rate */
+ if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
+ } else {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
+ }
+
+ /* rtt_sd */
+ rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu);
+ rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl);
+ rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
+ rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
+ DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
+ DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
+
+ /* average distance */
+ if (avg_dist != FTM_INVALID) {
+ rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
+ rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
+ /* rtt_sd is in 0.1 ns.
+ * host needs distance_sd in milli mtrs
+ * (0.1 * rtt_sd/2 * 10^-9) * C * 1000
+ */
+ rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */
+ } else {
+ rtt_report->distance = FTM_INVALID;
+ }
+ /* time stamp */
+ /* get the time elapsed from boot time */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ osl_get_monotonic_boottime(&ts);
+ rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
+#endif /* LINUX_VER >= 2.6.39 */
+
+ if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
+ /* retry time after failure */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
+ DHD_RTT((">\tretry_after: %d%s\n",
+ ltoh32_ua(&p_data_info->u.retry_after.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+ } else {
+ /* burst duration */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
+ DHD_RTT((">\tburst_duration: %d%s\n",
+ ltoh32_ua(&p_data_info->u.burst_duration.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
+ DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
+ }
+ /* display detail if available */
+ num_rtt = ltoh16_ua(&p_data_info->num_rtt);
+ if (num_rtt > 0) {
+ DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
+ p_sample = &p_data_info->rtt[1];
+ for (i = 0; i < num_rtt; i++) {
+ snr = 0;
+ bitflips = 0;
+ tof_phy_error = 0;
+ tof_phy_tgt_error = 0;
+ tof_target_snr = 0;
+ tof_target_bitflips = 0;
+ rssi = 0;
+ dist = 0;
+ num_ftm = p_data_info->num_ftm;
+ /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
+ if ((i % num_ftm) == 1) {
+ rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
+ snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
+ bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
+ tof_phy_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_phy_error);
+ tof_phy_tgt_error =
+ (wl_proxd_phy_error_t)
+ ltoh32_ua(&p_sample->tof_tgt_phy_error);
+ tof_target_snr =
+ (wl_proxd_snr_t)
+ ltoh16_ua(&p_sample->tof_tgt_snr);
+ tof_target_bitflips =
+ (wl_proxd_bitflips_t)
+ ltoh16_ua(&p_sample->tof_tgt_bitflips);
+ dist = ltoh32_ua(&p_sample->distance);
+ chanspec = ltoh32_ua(&p_sample->chanspec);
+ } else {
+ rssi = -1;
+ snr = 0;
+ bitflips = 0;
+ dist = 0;
+ tof_target_bitflips = 0;
+ tof_target_snr = 0;
+ tof_phy_tgt_error = 0;
+ }
+ DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
+ " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
+ " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s"
+ " coreid=%d chanspec=0x%08x\n",
i, p_sample->id, rssi, snr,
bitflips, tof_phy_error, tof_phy_tgt_error,
tof_target_snr,
ltoh32_ua(&p_sample->rtt.intvl),
ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
- ftm_frame_types[i % num_ftm], p_sample->coreid));
+ ftm_frame_types[i % num_ftm], p_sample->coreid,
+ chanspec));
p_sample++;
}
}
return err;
}
+#ifdef WL_CFG80211
+/* Common API for handling Session End.
+* This API will flush out the results for a peer MAC.
+*
+* @For legacy FTM session, this API will be called
+* when legacy FTM_SESSION_END event is received.
+* @For legacy Nan-RTT , this API will be called when
+* we are cancelling the nan-ranging session or on
+* nan-ranging-end event.
+*/
+static void
+dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd)
+{
+
+ int idx;
+ struct rtt_noti_callback *iter;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *next2;
+ rtt_result_t *rtt_result;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work(&rtt_status->proxd_timeout);
+ }
+
+ /* find next target to trigger RTT */
+ for (idx = (rtt_status->cur_idx + 1);
+ idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+ if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+ /* restart to measure RTT from next device */
+ DHD_INFO(("restart to measure rtt\n"));
+ schedule_work(&rtt_status->work);
+ } else {
+ DHD_RTT(("RTT_STOPPED\n"));
+ rtt_status->status = RTT_STOPPED;
+ /* notify the completed information to others */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ kfree(entry);
+ }
+ }
+ GCC_DIAGNOSTIC_POP();
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT),
+ 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
+ }
+}
+#endif /* WL_CFG80211 */
+#ifdef WL_CFG80211
static int
-dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data,
- uint16 tlvid, uint16 len)
+dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
+ struct ether_addr *addr)
{
- int i;
- int err = BCME_OK;
- char eabuf[ETHER_ADDR_STR_LEN];
- wl_proxd_result_flags_t flags;
- wl_proxd_session_state_t session_state;
- wl_proxd_status_t proxd_status;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct timespec ts;
-#endif /* LINUX_VER >= 2.6.39 */
- uint32 ratespec;
- uint32 avg_dist;
- const wl_proxd_rtt_result_v2_t *p_data_info = NULL;
- const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL;
- const wl_proxd_rtt_sample_v2_t *p_sample = NULL;
- uint16 num_rtt = 0;
- wl_proxd_intvl_t rtt;
- wl_proxd_intvl_t p_time;
- uint16 snr = 0, bitflips = 0;
- wl_proxd_phy_error_t tof_phy_error = 0;
- wl_proxd_phy_error_t tof_phy_tgt_error = 0;
- wl_proxd_snr_t tof_target_snr = 0;
- wl_proxd_bitflips_t tof_target_bitflips = 0;
- int16 rssi = 0;
- int32 dist = 0;
- uint32 chanspec = 0;
- uint8 num_ftm = 0;
- char *ftm_frame_types[] = FTM_FRAME_TYPES;
+ rtt_results_header_t *rtt_results_header = NULL;
+ rtt_target_info_t *rtt_target_info;
+ int ret = BCME_OK;
+ rtt_result_t *rtt_result;
- BCM_REFERENCE(ftm_frame_types);
- BCM_REFERENCE(dist);
- BCM_REFERENCE(rssi);
- BCM_REFERENCE(tof_target_bitflips);
- BCM_REFERENCE(tof_target_snr);
- BCM_REFERENCE(tof_phy_tgt_error);
- BCM_REFERENCE(tof_phy_error);
- BCM_REFERENCE(bitflips);
- BCM_REFERENCE(snr);
- BCM_REFERENCE(chanspec);
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_results_header->peer_mac,
+ ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ kfree(rtt_results_header);
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP;
+ /* same src and dest len */
+ (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN,
+ &rtt_target_info->addr, ETHER_ADDR_LEN);
+ rtt_result->report.distance = FTM_INVALID;
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len;
+exit:
+ return ret;
+}
- NULL_CHECK(rtt_report, "rtt_report is NULL", err);
- NULL_CHECK(p_data, "p_data is NULL", err);
- DHD_RTT(("%s enter\n", __FUNCTION__));
- p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data;
- /* unpack and format 'flags' for display */
- flags = ltoh16_ua(&p_data_info->flags);
- /* session state and status */
- session_state = ltoh16_ua(&p_data_info->state);
- proxd_status = ltoh32_ua(&p_data_info->status);
- bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
- ftm_session_state_value_to_logstr(session_state);
- ftm_status_value_to_logstr(proxd_status);
- DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
- eabuf,
- session_state,
- ftm_session_state_value_to_logstr(session_state),
- proxd_status,
- ftm_status_value_to_logstr(proxd_status)));
- /* show avg_dist (1/256m units), burst_num */
- avg_dist = ltoh32_ua(&p_data_info->avg_dist);
- if (avg_dist == 0xffffffff) { /* report 'failure' case */
- DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt)); /* in a session */
- avg_dist = FTM_INVALID;
- } else {
- DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
- avg_dist >> 8, /* 1/256m units */
- ((avg_dist & 0xff) * 625) >> 4,
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt,
- p_data_info->num_ftm)); /* in a session */
+static bool
+dhd_rtt_get_report_header(rtt_status_info_t *rtt_status,
+ rtt_results_header_t **rtt_results_header, struct ether_addr *addr)
+{
+ rtt_results_header_t *entry;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ /* find a rtt_report_header for this mac address */
+ list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+ GCC_DIAGNOSTIC_POP();
+ if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) {
+ /* found a rtt_report_header for peer_mac in the list */
+ if (rtt_results_header) {
+ *rtt_results_header = entry;
+ }
+ return TRUE;
+ }
}
+ return FALSE;
+}
- /* show 'avg_rtt' sample */
- /* in v2, avg_rtt is the first element of the variable rtt[] */
- p_sample_avg = &p_data_info->rtt[0];
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
- DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d"
- "ratespec=0x%08x chanspec=0x%08x\n",
- (int16) ltoh16_ua(&p_sample_avg->rssi),
- ltoh32_ua(&p_sample_avg->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
- ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
- ltoh32_ua(&p_sample_avg->ratespec),
- ltoh32_ua(&p_sample_avg->chanspec)));
+int
+dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer)
+{
+ bool is_new = TRUE;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ mutex_lock(&rtt_status->rtt_mutex);
+ is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer);
- /* set peer address */
- rtt_report->addr = p_data_info->peer;
+ if (is_new) { /* no FTM result..create failure result */
+ dhd_rtt_create_failure_result(rtt_status, peer);
+ }
+ dhd_rtt_handle_rtt_session_end(dhd);
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 */
- /* burst num */
- rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+static bool
+dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result)
+{
+ bool ret = FALSE;
- /* success num */
- rtt_report->success_num = p_data_info->num_valid_rtt;
+ if (rtt_result && (rtt_result->report.success_num != 0)) {
+ ret = TRUE;
+ }
+ return ret;
+}
- /* actual number of FTM supported by peer */
- rtt_report->num_per_burst_peer = p_data_info->num_ftm;
- rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+static int
+dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data,
+ int tlvs_len, rtt_result_t *rtt_result)
+{
+ int ret = BCME_OK;
- /* status */
- rtt_report->status = ftm_get_statusmap_info(proxd_status,
- &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf((void *) rtt_result,
+ (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->detail_len = sizeof(rtt_result->rtt_detail);
- /* rssi (0.5db) */
- rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2;
+exit:
+ return ret;
- /* rx rate */
- ratespec = ltoh32_ua(&p_sample_avg->ratespec);
- rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+}
- /* tx rate */
- if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
+static int
+dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan)
+{
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *rtt_results_header = NULL;
+ bool is_new = TRUE;
+ int ret = BCME_OK;
+ int err_at = 0;
+
+ rtt_status = GET_RTTSTATE(dhd);
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, peer_addr);
+
+ if (tlvs_len > 0) {
+ if (is_new) {
+ /* allocate new header for rtt_results */
+ rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
+ sizeof(rtt_results_header_t));
+ if (!rtt_results_header) {
+ ret = BCME_NORESOURCE;
+ err_at = 1;
+ goto exit;
+ }
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ /* same src and header len */
+ (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN,
+ peer_addr, ETHER_ADDR_LEN);
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+ }
+
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
+ if ((ret == BCME_OK) && ((!is_nan) ||
+ dhd_rtt_is_valid_measurement(rtt_result))) {
+ /*
+ * Add to list, if non-nan RTT (legacy) or
+ * valid measurement in nan rtt case
+ */
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len +
+ rtt_result->detail_len;
+ } else {
+ err_at = 2;
+ if (ret == BCME_OK) {
+ /* Case for nan rtt invalid measurement */
+ ret = BCME_ERROR;
+ err_at = 3;
+ }
+ goto exit;
+ }
} else {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
+ ret = BCME_ERROR;
+ err_at = 4;
+ goto exit;
}
- /* rtt_sd */
- rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu);
- rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl);
- rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
- rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
- DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
- DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
+exit:
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, "
+ " ret = %d, err_at = %d\n", ret, err_at));
+ if (rtt_results_header) {
+ list_del(&rtt_results_header->list);
+ kfree(rtt_results_header);
+ rtt_results_header = NULL;
+ }
+ }
+ return ret;
+}
+
+#ifdef WL_NAN
+static void
+dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg,
+ rtt_result_t *rtt_result)
+{
+ wl_nan_ev_rng_rpt_ind_t range_res;
+
+ UNUSED_PARAMETER(range_res);
+ if (!dhd_rtt_is_valid_measurement(rtt_result)) {
+ /* Drop Invalid Measurements for NAN RTT report */
+ DHD_RTT(("dhd_rtt_nan_range_report: Drop Invalid Measurements\n"));
+ goto exit;
+ }
+ bzero(&range_res, sizeof(range_res));
+ range_res.indication = 0;
+ range_res.dist_mm = rtt_result->report.distance;
+ /* same src and header len, ignoring ret val here */
+ (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN,
+ &rtt_result->report.addr, ETHER_ADDR_LEN);
+ wl_cfgnan_process_range_report(cfg, &range_res);
+
+exit:
+ return;
+}
+
+static int
+dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
+ wl_proxd_event_t *proxd_ev_data, int tlvs_len)
+{
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
+ nan_ranging_inst_t *rng_inst = NULL;
+ rtt_status_info_t *rtt_status = NULL;
+ rtt_result_t *rtt_result = NULL;
+ bool is_geofence = FALSE;
+ int ret = BCME_OK;
+
+ ndev = dhd_linux_get_primary_netdev(dhd);
+ cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ NAN_MUTEX_LOCK();
+ mutex_lock(&rtt_status->rtt_mutex);
- /* average distance */
- if (avg_dist != FTM_INVALID) {
- rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
- rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
- } else {
- rtt_report->distance = FTM_INVALID;
+ if ((cfg->nan_enable == FALSE) ||
+ ETHER_ISNULLADDR(peer_addr)) {
+ DHD_RTT_ERR(("Received Burst End with NULL ether addr, "
+ "or nan disable, nan_enable = %d\n", cfg->nan_enable));
+ ret = BCME_UNSUPPORTED;
+ goto exit;
}
- /* time stamp */
- /* get the time elapsed from boot time */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- get_monotonic_boottime(&ts);
- rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
-#endif /* LINUX_VER >= 2.6.39 */
- if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
- /* retry time after failure */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
- DHD_RTT((">\tretry_after: %d%s\n",
- ltoh32_ua(&p_data_info->u.retry_after.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (rng_inst) {
+ is_geofence = (rng_inst->range_type
+ == RTT_TYPE_NAN_GEOFENCE);
} else {
- /* burst duration */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
- DHD_RTT((">\tburst_duration: %d%s\n",
- ltoh32_ua(&p_data_info->u.burst_duration.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
- DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
+ DHD_RTT_ERR(("Received Burst End without Ranging Instance\n"));
+ ret = BCME_ERROR;
+ goto exit;
}
- /* display detail if available */
- num_rtt = ltoh16_ua(&p_data_info->num_rtt);
- if (num_rtt > 0) {
- DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
- p_sample = &p_data_info->rtt[1];
- for (i = 0; i < num_rtt; i++) {
- snr = 0;
- bitflips = 0;
- tof_phy_error = 0;
- tof_phy_tgt_error = 0;
- tof_target_snr = 0;
- tof_target_bitflips = 0;
- rssi = 0;
- dist = 0;
- num_ftm = p_data_info->num_ftm;
- /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
- if ((i % num_ftm) == 1) {
- rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
- snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
- bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
- tof_phy_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_phy_error);
- tof_phy_tgt_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_tgt_phy_error);
- tof_target_snr =
- (wl_proxd_snr_t)
- ltoh16_ua(&p_sample->tof_tgt_snr);
- tof_target_bitflips =
- (wl_proxd_bitflips_t)
- ltoh16_ua(&p_sample->tof_tgt_bitflips);
- dist = ltoh32_ua(&p_sample->distance);
- chanspec = ltoh32_ua(&p_sample->chanspec);
+
+ /* allocate rtt_results for new results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = BCME_NORESOURCE;
+ goto exit;
+ }
+
+ if (is_geofence) {
+ ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
+ if (ret != BCME_OK) {
+ DHD_RTT_ERR(("dhd_rtt_handle_nan_burst_end: "
+ "dhd_rtt_parse_result_event failed\n"));
+ goto exit;
+ }
+ } else {
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
} else {
- rssi = -1;
- snr = 0;
- bitflips = 0;
- dist = 0;
- tof_target_bitflips = 0;
- tof_target_snr = 0;
- tof_phy_tgt_error = 0;
+ ret = BCME_UNSUPPORTED;
+ goto exit;
}
- DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
- " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
- " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s"
- " coreid=%d chanspec=0x%08x\n",
- i, p_sample->id, rssi, snr,
- bitflips, tof_phy_error, tof_phy_tgt_error,
- tof_target_snr,
- tof_target_bitflips, dist,
- ltoh32_ua(&p_sample->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
- ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
- ftm_frame_types[i % num_ftm], p_sample->coreid,
- chanspec));
- p_sample++;
}
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr,
+ proxd_ev_data, tlvs_len, rtt_result, TRUE);
+ if (ret != BCME_OK) {
+ goto exit;
+ }
+
}
- return err;
+
+exit:
+ mutex_unlock(&rtt_status->rtt_mutex);
+ if (ret == BCME_OK) {
+ dhd_rtt_nan_range_report(cfg, rtt_result);
+ }
+ if (rtt_result &&
+ ((ret != BCME_OK) || is_geofence)) {
+ kfree(rtt_result);
+ rtt_result = NULL;
+ }
+ NAN_MUTEX_UNLOCK();
+ return ret;
}
+#endif /* WL_NAN */
int
dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
wl_proxd_ftm_session_status_t session_status;
const ftm_strmap_entry_t *p_loginfo;
rtt_result_t *rtt_result;
- gfp_t kflags;
#ifdef WL_CFG80211
- int idx;
rtt_status_info_t *rtt_status;
- rtt_target_info_t *rtt_target_info;
- struct rtt_noti_callback *iter;
- rtt_results_header_t *entry, *next, *rtt_results_header = NULL;
- rtt_result_t *next2;
+ rtt_results_header_t *rtt_results_header = NULL;
bool is_new = TRUE;
#endif /* WL_CFG80211 */
DHD_RTT(("Enter %s \n", __FUNCTION__));
NULL_CHECK(dhd, "dhd is NULL", ret);
-#ifdef WL_CFG80211
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
-
- if (RTT_IS_STOPPED(rtt_status)) {
- /* Ignore the Proxd event */
- DHD_RTT((" event handler rtt is stopped \n"));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_RTT(("Device is target/Responder. Recv the event. \n"));
- } else {
- return ret;
- }
- }
-#endif /* WL_CFG80211 */
if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) {
DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__,
ntoh32_ua((void *)&event->datalen)));
}
event_type = ntoh32_ua((void *)&event->event_type);
if (event_type != WLC_E_PROXD) {
- DHD_ERROR((" failed event \n"));
+ DHD_RTT_ERR((" failed event \n"));
return -EINVAL;
}
if (!event_data) {
- DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__));
return -EINVAL;
}
p_event = (wl_proxd_event_t *) event_data;
version = ltoh16(p_event->version);
if (version < WL_PROXD_API_VERSION) {
- DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
+ DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
version, WL_PROXD_API_VERSION));
return ret;
}
-#ifdef WL_CFG80211
- if (!in_atomic()) {
- mutex_lock(&rtt_status->rtt_mutex);
- }
-#endif /* WL_CFG80211 */
- event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
- kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+ event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n",
p_event->type, ntoh16(p_event->type), ltoh16(p_event->type)));
p_loginfo = ftm_get_event_type_loginfo(event_type);
if (p_loginfo == NULL) {
- DHD_ERROR(("receive an invalid FTM event %d\n", event_type));
+ DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type));
ret = -EINVAL;
- goto exit; /* ignore this event */
+ return ret; /* ignore this event */
}
/* get TLVs len, skip over event header */
if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) {
- DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
+ DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
ret = -EINVAL;
- goto exit;
+ return ret;
}
tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs);
DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n",
ltoh16(p_event->sid),
tlvs_len));
#ifdef WL_CFG80211
- rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- /* find a rtt_report_header for this mac address */
- list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
- if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) {
- /* found a rtt_report_header for peer_mac in the list */
- is_new = FALSE;
- rtt_results_header = entry;
- break;
- }
+#ifdef WL_NAN
+ if ((event_type == WL_PROXD_EVENT_BURST_END) &&
+ dhd_rtt_is_nan_peer(dhd, &event->addr)) {
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n"));
+ ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len);
+ return ret;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+#endif /* WL_NAN */
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+ mutex_lock(&rtt_status->rtt_mutex);
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ }
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ is_new = !dhd_rtt_get_report_header(rtt_status,
+ &rtt_results_header, &event->addr);
+ GCC_DIAGNOSTIC_POP();
#endif /* WL_CFG80211 */
switch (event_type) {
case WL_PROXD_EVENT_SESSION_CREATE:
DHD_RTT(("WL_PROXD_EVENT_BURST_START\n"));
break;
case WL_PROXD_EVENT_BURST_END:
- DHD_RTT(("WL_PROXD_EVENT_BURST_END\n"));
-#ifdef WL_CFG80211
- if (is_new) {
- /* allocate new header for rtt_results */
- rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags);
- if (!rtt_results_header) {
- ret = -ENOMEM;
- goto exit;
- }
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- rtt_results_header->peer_mac = event->addr;
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n"));
+ /* allocate rtt_results for new legacy rtt results */
+ rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ goto exit;
}
-#endif /* WL_CFG80211 */
- if (tlvs_len > 0) {
- /* allocate rtt_results for new results */
- rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
- if (!rtt_result) {
- ret = -ENOMEM;
- goto exit;
- }
- /* unpack TLVs and invokes the cbfn to print the event content TLVs */
- ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report),
- (uint8 *)&p_event->tlvs[0], tlvs_len,
- BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
- __FUNCTION__));
- goto exit;
- }
-#ifdef WL_CFG80211
- /* fill out the results from the configuration param */
- rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
-
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len;
-#endif /* WL_CFG80211 */
+ ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr,
+ p_event, tlvs_len, rtt_result, FALSE);
+ if (rtt_result && (ret != BCME_OK)) {
+ kfree(rtt_result);
+ rtt_result = NULL;
+ goto exit;
}
break;
case WL_PROXD_EVENT_SESSION_END:
- DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+ if (dhd_rtt_is_nan_peer(dhd, &event->addr)) {
+ /*
+ * Nothing to do for session end for nan peer
+ * All taken care in burst end and nan rng rep
+ */
+ break;
+ }
#ifdef WL_CFG80211
if (!RTT_IS_ENABLED(rtt_status)) {
DHD_RTT(("Ignore the session end evt\n"));
(uint8 *)&p_event->tlvs[0], tlvs_len,
BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
__FUNCTION__));
goto exit;
}
#ifdef WL_CFG80211
/* In case of no result for the peer device, make fake result for error case */
if (is_new) {
- /* allocate new header for rtt_results */
- rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL);
- if (!rtt_results_header) {
- ret = -ENOMEM;
- goto exit;
- }
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- rtt_results_header->peer_mac = event->addr;
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
-
- /* allocate rtt_results for new results */
- rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
- if (!rtt_result) {
- ret = -ENOMEM;
- kfree(rtt_results_header);
- goto exit;
- }
- /* fill out the results from the configuration param */
- rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
- rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP;
- rtt_result->report.addr = rtt_target_info->addr;
- rtt_result->report.distance = FTM_INVALID;
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len;
- }
- /* find next target to trigger RTT */
- for (idx = (rtt_status->cur_idx + 1);
- idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
- /* skip the disabled device */
- if (rtt_status->rtt_config.target_info[idx].disable) {
- continue;
- } else {
- /* set the idx to cur_idx */
- rtt_status->cur_idx = idx;
- break;
- }
- }
- if (idx < rtt_status->rtt_config.rtt_target_cnt) {
- /* restart to measure RTT from next device */
- DHD_ERROR(("restart to measure rtt\n"));
- schedule_work(&rtt_status->work);
- } else {
- DHD_RTT(("RTT_STOPPED\n"));
- rtt_status->status = RTT_STOPPED;
- schedule_work(&rtt_status->work);
- /* notify the completed information to others */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
- }
- /* remove the rtt results in cache */
- if (!list_empty(&rtt_status->rtt_results_cache)) {
- /* Iterate rtt_results_header list */
- list_for_each_entry_safe(entry, next,
- &rtt_status->rtt_results_cache, list) {
- list_del(&entry->list);
- /* Iterate rtt_result list */
- list_for_each_entry_safe(rtt_result, next2,
- &entry->result_list, list) {
- list_del(&rtt_result->list);
- kfree(rtt_result);
- }
- kfree(entry);
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- /* reinitialize the HEAD */
- INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
- /* clear information for rtt_config */
- rtt_status->rtt_config.rtt_target_cnt = 0;
- memset(rtt_status->rtt_config.target_info, 0,
- TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
- rtt_status->cur_idx = 0;
+ dhd_rtt_create_failure_result(rtt_status, &event->addr);
}
+ DHD_RTT(("\n Not Nan peer..proceed to notify result and restart\n"));
+ dhd_rtt_handle_rtt_session_end(dhd);
#endif /* WL_CFG80211 */
break;
case WL_PROXD_EVENT_SESSION_RESTART:
DHD_RTT(("WL_PROXD_EVENT_COLLECT\n"));
if (tlvs_len > 0) {
void *buffer = NULL;
- if (!(buffer = kzalloc(tlvs_len, kflags))) {
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
ret = -ENOMEM;
goto exit;
}
BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
kfree(buffer);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
__FUNCTION__, event_type));
goto exit;
}
DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n"));
if (tlvs_len > 0) {
void *buffer = NULL;
- if (!(buffer = kzalloc(tlvs_len, kflags))) {
+ if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
ret = -ENOMEM;
goto exit;
}
BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
kfree(buffer);
if (ret != BCME_OK) {
- DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n",
+ DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
__FUNCTION__, event_type));
goto exit;
}
break;
default:
- DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
+ DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
break;
}
exit:
#ifdef WL_CFG80211
- if (!in_atomic()) {
- mutex_unlock(&rtt_status->rtt_mutex);
- }
+ mutex_unlock(&rtt_status->rtt_mutex);
#endif /* WL_CFG80211 */
return ret;
{
rtt_status_info_t *rtt_status;
dhd_pub_t *dhd;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
rtt_status = container_of(work, rtt_status_info_t, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- if (rtt_status == NULL) {
- DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
- return;
- }
+ GCC_DIAGNOSTIC_POP();
+
dhd = rtt_status->dhd;
if (dhd == NULL) {
- DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
return;
}
(void) dhd_rtt_start(dhd);
ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
}
} else {
- DHD_ERROR(("Failed to get the chanspec \n"));
+ DHD_RTT_ERR(("Failed to get the chanspec \n"));
}
return err;
}
wf_chspec_ntoa(chanspec, chanbuf)));
err = wldev_iovar_setint(dev, "chanspec", chanspec);
if (err) {
- DHD_ERROR(("Failed to set the chanspec \n"));
+ DHD_RTT_ERR(("Failed to set the chanspec \n"));
}
}
rtt_status->pm = PM_OFF;
err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
DHD_RTT(("Current PM value read %d\n", rtt_status->pm));
if (err) {
- DHD_ERROR(("Failed to get the PM value \n"));
+ DHD_RTT_ERR(("Failed to get the PM value \n"));
} else {
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to set the PM \n"));
+ DHD_RTT_ERR(("Failed to set the PM \n"));
rtt_status->pm_restore = FALSE;
} else {
rtt_status->pm_restore = TRUE;
if (!RTT_IS_ENABLED(rtt_status)) {
err = dhd_rtt_ftm_enable(dhd, TRUE);
if (err) {
- DHD_ERROR(("Failed to enable FTM (%d)\n", err));
+ DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err));
goto exit;
}
DHD_RTT(("FTM enabled \n"));
exit:
if (err) {
rtt_status->status = RTT_STOPPED;
- DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
+ DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__));
dhd_rtt_ftm_enable(dhd, FALSE);
DHD_RTT(("restoring the PM value \n"));
if (rtt_status->pm_restore) {
pm = PM_FAST;
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to restore PM \n"));
+ DHD_RTT_ERR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
DHD_RTT(("Enter %s \n", __FUNCTION__));
err = dhd_rtt_ftm_enable(dhd, FALSE);
if (err) {
- DHD_ERROR(("failed to disable FTM (%d)\n", err));
+ DHD_RTT_ERR(("failed to disable FTM (%d)\n", err));
}
rtt_status->status = RTT_STOPPED;
if (rtt_status->pm_restore) {
DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore));
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_ERROR(("Failed to restore PM \n"));
+ DHD_RTT_ERR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
int32 drv_up = 1;
int32 version;
rtt_status_info_t *rtt_status;
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ int ftm_param_cnt = 0;
+
NULL_CHECK(dhd, "dhd is NULL", err);
dhd->rtt_supported = FALSE;
if (dhd->rtt_state) {
return err;
}
- dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL);
+ dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh,
+ sizeof(rtt_status_info_t));
if (dhd->rtt_state == NULL) {
err = BCME_NOMEM;
- DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__));
return err;
}
bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
rtt_status = GET_RTTSTATE(dhd);
rtt_status->rtt_config.target_info =
- kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL);
+ (rtt_target_info_t *)MALLOCZ(dhd->osh,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
if (rtt_status->rtt_config.target_info == NULL) {
- DHD_ERROR(("%s failed to allocate the target info for %d\n",
+ DHD_RTT_ERR(("%s failed to allocate the target info for %d\n",
__FUNCTION__, RTT_MAX_TARGET_CNT));
err = BCME_NOMEM;
goto exit;
ret = dhd_rtt_get_version(dhd, &version);
if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) {
- DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__));
dhd->rtt_supported = TRUE;
/* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */
rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY;
rtt_status->rtt_capa.bw |= RTT_BW_80;
} else {
if ((ret != BCME_OK) || (version == 0)) {
- DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__));
+ DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__));
} else {
- DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
+ DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
__FUNCTION__, WL_PROXD_API_VERSION, version));
}
}
/* cancel all of RTT request once we got the cancel request */
rtt_status->all_cancel = TRUE;
mutex_init(&rtt_status->rtt_mutex);
+ mutex_init(&rtt_status->rtt_work_mutex);
+ mutex_init(&rtt_status->geofence_mutex);
INIT_LIST_HEAD(&rtt_status->noti_fn_list);
INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
INIT_WORK(&rtt_status->work, dhd_rtt_work);
+ /* initialize proxd timer */
+ INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work);
+#ifdef WL_NAN
+ /* initialize proxd retry timer */
+ INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work);
+#endif /* WL_NAN */
+ /* Global proxd config */
+ ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
+ (1 << WL_PROXD_EVENT_SESSION_END));
+ ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
+ dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL,
+ ftm_params, ftm_param_cnt);
exit:
if (err < 0) {
kfree(rtt_status->rtt_config.target_info);
rtt_status->status = RTT_STOPPED;
DHD_RTT(("rtt is stopped %s \n", __FUNCTION__));
/* clear evt callback list */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
-
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
if (!list_empty(&rtt_status->noti_fn_list)) {
list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
list_del(&iter->list);
kfree(rtt_header);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
kfree(rtt_status->rtt_config.target_info);
kfree(dhd->rtt_state);
dhd->rtt_state = NULL;
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#define DEFAULT_RETRY_CNT 6
#define DEFAULT_FTM_FREQ 5180
#define DEFAULT_FTM_CNTR_FREQ0 5210
+#define RTT_MAX_GEOFENCE_TARGET_CNT 8
#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count)
#define TARGET_TYPE(target) (target->type)
+#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
+#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
+
+#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex)
+#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex)
+
#ifndef BIT
#define BIT(x) (1 << (x))
#endif // endif
#define WL_RATE_54M 108 /* in 500kbps units */
#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+#ifdef WL_NAN
+/* RTT Retry Timer Interval */
+#define DHD_RTT_RETRY_TIMER_INTERVAL_MS 3000u
+#endif /* WL_NAN */
+
+#define DHD_RTT_INVALID_TARGET_INDEX -1
+
enum rtt_role {
RTT_INITIATOR = 0,
RTT_TARGET = 1
RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled
RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request
RTT_STATUS_INVALID_REQ = 13, // bad request args
- RTT_STATUS_NO_WIFI = 14, // WiFi not enabled
- // Responder overrides param info, cannot range with new params
+ RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info
+ // cannot range with new params
RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15
} rtt_reason_t;
RTT_RATE_160M
};
+typedef enum ranging_type {
+ RTT_TYPE_INVALID = 0,
+ RTT_TYPE_LEGACY = 1,
+ RTT_TYPE_NAN_DIRECTED = 2,
+ RTT_TYPE_NAN_GEOFENCE = 3
+} ranging_type_t;
+
#define FTM_MAX_NUM_BURST_EXP 14
#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY)
#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY)
* in a single frame
*/
uint32 num_frames_per_burst;
- /* num of frames in each RTT burst
+ /*
+ * num of frames in each RTT burst
* for single side, measurement result num = frame number
* for 2 side RTT, measurement result num = frame number - 1
*/
* at the end of the burst_duration it requested.
*/
uint32 burst_duration;
+ uint32 burst_timeout;
uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */
uint8 bw; /* 5, 10, 20, 40, 80, 160 */
} rtt_target_info_t;
+typedef struct rtt_goefence_target_info {
+ bool valid;
+ struct ether_addr peer_addr;
+} rtt_geofence_target_info_t;
+
typedef struct rtt_config_params {
int8 rtt_target_cnt;
rtt_target_info_t *target_info;
} rtt_config_params_t;
+typedef struct rtt_geofence_cfg {
+ int8 geofence_target_cnt;
+ bool rtt_in_progress;
+ bool role_concurr_state;
+ int8 cur_target_idx;
+ rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT];
+ int geofence_rtt_interval;
+#ifdef RTT_GEOFENCE_CONT
+ bool geofence_cont;
+#endif /* RTT_GEOFENCE_CONT */
+} rtt_geofence_cfg_t;
+
+/*
+ * Keep Adding more reasons
+ * going forward if needed
+ */
+enum rtt_schedule_reason {
+ RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */
+ RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */
+ RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */
+ RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */
+ RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */
+ RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */
+ RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */
+ RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */
+ RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */
+ RTT_SCHED_RTT_RETRY_GEOFENCE = 10, /* On Geofence Retry */
+ RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE = 11 /* On Rng Term, while pending role change */
+};
+
+/*
+ * Keep Adding more invalid RTT states
+ * going forward if needed
+ */
+enum rtt_invalid_state {
+ RTT_STATE_VALID = 0, /* RTT state is valid */
+ RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */
+};
+
typedef struct rtt_status_info {
- dhd_pub_t *dhd;
- int8 status; /* current status for the current entry */
- int8 txchain; /* current device tx chain */
- int pm; /* to save current value of pm */
- int8 pm_restore; /* flag to reset the old value of pm */
- int8 cur_idx; /* current entry to do RTT */
- bool all_cancel; /* cancel all request once we got the cancel requet */
- uint32 flags; /* indicate whether device is configured as initiator or target */
- struct capability {
+ dhd_pub_t *dhd;
+ int8 status; /* current status for the current entry */
+ int8 txchain; /* current device tx chain */
+ int pm; /* to save current value of pm */
+ int8 pm_restore; /* flag to reset the old value of pm */
+ int8 cur_idx; /* current entry to do RTT */
+ bool all_cancel; /* cancel all request once we got the cancel requet */
+ uint32 flags; /* indicate whether device is configured as initiator or target */
+ struct capability {
int32 proto :8;
int32 feature :8;
int32 preamble :8;
int32 bw :8;
} rtt_capa; /* rtt capability */
- struct mutex rtt_mutex;
- rtt_config_params_t rtt_config;
- struct work_struct work;
- struct list_head noti_fn_list;
- struct list_head rtt_results_cache; /* store results for RTT */
+ struct mutex rtt_mutex;
+ struct mutex rtt_work_mutex;
+ struct mutex geofence_mutex;
+ rtt_config_params_t rtt_config;
+ rtt_geofence_cfg_t geofence_cfg;
+ struct work_struct work;
+ struct list_head noti_fn_list;
+ struct list_head rtt_results_cache; /* store results for RTT */
+ int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */
+ struct delayed_work proxd_timeout; /* Proxd Timeout work */
+ struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */
} rtt_status_info_t;
typedef struct rtt_report {
struct list_head list;
struct list_head result_list;
} rtt_results_header_t;
-
+struct rtt_result_detail {
+ uint8 num_ota_meas;
+ uint32 result_flags;
+};
/* rtt_result to link all of rtt_report */
typedef struct rtt_result {
struct list_head list;
struct rtt_report report;
int32 report_len; /* total length of rtt_report */
+ struct rtt_result_detail rtt_detail;
+ int32 detail_len;
} rtt_result_t;
/* RTT Capabilities */
int
dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+#ifdef WL_NAN
+void dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd);
+#ifdef RTT_GEOFENCE_CONT
+void dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont);
+
+void dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont);
+#endif /* RTT_GEOFENCE_CONT */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+void dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval);
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+void dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state);
+
+bool dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd);
+
+int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd);
+
+void dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state);
+
+bool dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd);
+
+rtt_geofence_target_info_t*
+dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr,
+ int8 *index);
+
+int
+dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target);
+
+int
+dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr);
+
+int
+dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd);
+
+int
+dhd_rtt_delete_nan_session(dhd_pub_t *dhd);
+#endif /* WL_NAN */
+
+uint8
+dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr);
+
+void
+dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason);
+
int
dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
int
dhd_rtt_deinit(dhd_pub_t *dhd);
+
+#ifdef WL_CFG80211
+int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd,
+ struct ether_addr *peer);
+
+void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd);
+
+int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd);
+#endif /* WL_CFG80211 */
+
#endif /* __DHD_RTT_H__ */
/*
* DHD Bus Module for SDIO
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_sdio.c 771911 2018-07-12 12:35:33Z $
+ * $Id: dhd_sdio.c 825481 2019-06-14 10:06:03Z $
*/
#include <typedefs.h>
/* Device console log buffer state */
#define CONSOLE_LINE_MAX 192
-#define CONSOLE_BUFFER_MAX 2024
+#define CONSOLE_BUFFER_MAX 8192
typedef struct dhd_console {
uint count; /* Poll interval msec counter */
uint log_addr; /* Log struct address (fixed) */
bool usebufpool;
int32 txinrx_thres; /* num of in-queued pkts */
int32 dotxinrx; /* tx first in dhdsdio_readframes */
+#ifdef BCMSDIO_RXLIM_POST
+ bool rxlim_en;
+ uint32 rxlim_addr;
+#endif /* BCMSDIO_RXLIM_POST */
#ifdef SDTEST
/* external loopback */
bool ext_loop;
#if defined(DHD_FW_COREDUMP)
static int dhdsdio_mem_dump(dhd_bus_t *bus);
+static int dhdsdio_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
(bus->sih->chip == BCM4362_CHIP_ID) ||
(bus->sih->chip == BCM43012_CHIP_ID) ||
(bus->sih->chip == BCM43014_CHIP_ID) ||
- (bus->sih->chip == BCM43751_CHIP_ID)) {
+ (bus->sih->chip == BCM43751_CHIP_ID) ||
+ (bus->sih->chip == BCM43752_CHIP_ID)) {
core_capext = TRUE;
} else {
core_capext = bcmsdh_reg_read(bus->sdh,
CHIPID(bus->sih->chip) == BCM43012_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM4362_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM43014_CHIP_ID ||
- CHIPID(bus->sih->chip) == BCM43751_CHIP_ID)
+ CHIPID(bus->sih->chip) == BCM43751_CHIP_ID ||
+ CHIPID(bus->sih->chip) == BCM43752_CHIP_ID)
dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
if (bus->sih->chip == BCM43012_CHIP_ID) {
DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
dhd_os_send_hang_message(bus->dhd);
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
return BCME_ERROR;
} else {
ht_avail_error = 0;
}
#endif /* BT_OVER_SDIO */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
{
int func_blk_size = function_num;
bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
0, &func_blk_size, sizeof(int32), IOV_SET);
if (bcmerr != BCME_OK) {
- DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
+ DHD_ERROR(("%s: Set F%d Block size error\n", __FUNCTION__, function_num));
return BCME_ERROR;
}
}
return BCME_OK;
}
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
void
prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
/* move from dhdsdio_sendfromq(), try to orphan skb early */
- if (bus->dhd->conf->orphan_move)
+ if (bus->dhd->conf->orphan_move == 1)
PKTORPHAN(pkt, bus->dhd->conf->tsq);
/* Check for existing queue, current flow-control, pending event, or pending clock */
#ifdef DHD_LOSSLESS_ROAMING
uint8 *pktdata;
struct ether_header *eh;
+#ifdef BDC
+ struct bdc_header *bdc_header;
+ uint8 data_offset;
+#endif // endif
#endif /* DHD_LOSSLESS_ROAMING */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
pktdata = (uint8 *)PKTDATA(osh, pkts[i]);
#ifdef BDC
/* Skip BDC header */
- pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset;
+ bdc_header = (struct bdc_header *)pktdata;
+ data_offset = bdc_header->dataOffset;
+ pktdata += BDC_HEADER_LEN + (data_offset << 2);
#endif // endif
eh = (struct ether_header *)pktdata;
if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
/* Restore to original priority for 802.1X packet */
if (prio == PRIO_8021D_NC) {
PKTSETPRIO(pkts[i], dhd->prio_8021x);
+#ifdef BDC
+ /* Restore to original priority in BDC header */
+ bdc_header->priority =
+ (dhd->prio_8021x & BDC_PRIORITY_MASK);
+#endif // endif
}
}
#endif /* DHD_LOSSLESS_ROAMING */
} else {
bus->dhd->txcnt_timeout++;
if (!bus->dhd->hang_was_sent) {
- DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d, bus->tx_max %d, bus->tx_seq %d\n",
- __FUNCTION__, bus->dhd->txcnt_timeout, bus->tx_max, bus->tx_seq));
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout));
+#ifdef BCMSDIO_RXLIM_POST
+ DHD_ERROR(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
+ __FUNCTION__,
+ bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
+#endif /* BCMSDIO_RXLIM_POST */
}
#ifdef DHD_FW_COREDUMP
/* Collect socram dump */
return -EIO;
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, false);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
dhd_sched_dpc(bus->dhd);
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, true);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
sh->console_addr = ltoh32(sh->console_addr);
sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+#ifdef BCMSDIO_RXLIM_POST
+ if (sh->flags & SDPCM_SHARED_RXLIM_POST) {
+ if (bus->dhd->conf->rxlim_en)
+ bus->rxlim_en = !!sh->msgtrace_addr;
+ bus->rxlim_addr = sh->msgtrace_addr;
+ DHD_INFO(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
+ __FUNCTION__,
+ bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
+ sh->flags &= ~SDPCM_SHARED_RXLIM_POST;
+ } else {
+ bus->rxlim_en = 0;
+ DHD_INFO(("%s: FW has no rx limit post support\n", __FUNCTION__));
+ }
+#endif /* BCMSDIO_RXLIM_POST */
+
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
return BCME_OK;
return dhdsdio_mem_dump(bus);
}
+int
+dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
+{
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return dhdsdio_get_mem_dump(dhdp->bus);
+}
+
static int
-dhdsdio_mem_dump(dhd_bus_t *bus)
+dhdsdio_get_mem_dump(dhd_bus_t *bus)
{
- int ret = 0;
- int size; /* Full mem size */
+ int ret = BCME_ERROR;
+ int size = bus->ramsize; /* Full mem size */
uint32 start = bus->dongle_ram_base; /* Start address */
uint read_size = 0; /* Read size of each iteration */
- uint8 *buf = NULL, *databuf = NULL;
+ uint8 *p_buf = NULL, *databuf = NULL;
/* Get full mem size */
- size = bus->ramsize;
- buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
- return -1;
+ p_buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!p_buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n",
+ __FUNCTION__, size));
+ return BCME_ERROR;
}
dhd_os_sdlock(bus->dhd);
/* Read mem content */
DHD_ERROR(("Dump dongle memory\n"));
- databuf = buf;
- while (size)
- {
+ databuf = p_buf;
+ while (size) {
read_size = MIN(MEMBLOCK, size);
- if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
- {
+ ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size);
+ if (ret) {
DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
ret = BCME_ERROR;
break;
dhd_os_sdunlock(bus->dhd);
- /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */
- if (!ret) {
- /* buf, actually soc_ram free handled in dhd_{free,clear} */
- dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+ return ret;
+}
+
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+ dhd_pub_t *dhdp;
+ int ret = BCME_ERROR;
+
+ dhdp = bus->dhd;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ ret = dhdsdio_get_mem_dump(bus);
+ if (ret) {
+ DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ /* schedule a work queue to perform actual memdump.
+ * dhd_mem_dump() performs the job
+ */
+ dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+ /* soc_ram free handled in dhd_{free,clear} */
}
return ret;
return bcmerror;
}
+bool
+dhd_bus_is_multibp_capable(struct dhd_bus *bus)
+{
+ return MULTIBP_CAP(bus->sih);
+}
+
static int
dhdsdio_download_state(dhd_bus_t *bus, bool enter)
{
if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s get\n",
+ __FUNCTION__, fnum, "sd_blocksize"));
} else {
- DHD_INFO(("%s: noted %s update, value now %d\n",
- __FUNCTION__, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: noted fn %d %s update, value now %d\n",
+ __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
/* if Firmware already hangs disbale any interrupt */
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->hostintmask = 0;
bcmsdh_intr_disable(bus->sdh);
/* Change our idea of bus state */
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
}
#else /* BCMSPI */
uint8 saveclk;
#endif /* BCMSPI */
+#if defined(SDIO_ISR_THREAD)
+ int intr_extn;
+#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
__FUNCTION__, enable, ready, tmo.elapsed));
+#if defined(SDIO_ISR_THREAD)
+ if (dhdp->conf->intr_extn) {
+ intr_extn = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, NULL);
+ if (intr_extn & 0x1) {
+ intr_extn |= 0x2;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, intr_extn, NULL);
+ }
+ }
+#endif
+
/* If F2 successfully enabled, set core and enable interrupts */
if (ready == enable) {
/* Make sure we're talking to the core. */
fail:
/* If we can't reach the device, signal failure */
- if (err || bcmsdh_regfail(sdh))
+ if (err || bcmsdh_regfail(sdh)) {
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
+ }
}
static void
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
- DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
#ifdef BCMSPI
if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) {
- DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_seq + 2;
} else {
#endif /* BCMSPI */
- DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
#ifdef BCMSPI
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
- DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
} else {
ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n",
__FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
if (err) {
DHD_ERROR(("%s: error writing DEVCTL: %d\n",
__FUNCTION__, err));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
}
#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+#ifdef BCMSDIO_RXLIM_POST
+ if (!DATAOK(bus) && bus->rxlim_en) {
+ uint8 rxlim = 0;
+ if (0 == dhdsdio_membytes(bus, FALSE, bus->rxlim_addr, (uint8 *)&rxlim, 1)) {
+ if (bus->tx_max != rxlim) {
+ DHD_INFO(("%s: bus->tx_max/rxlim=%d/%d\n", __FUNCTION__,
+ bus->tx_max, rxlim));
+ bus->tx_max = rxlim;
+ }
+ }
+ }
+#endif /* BCMSDIO_RXLIM_POST */
+
#ifdef PROP_TXSTATUS
dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
#endif // endif
} else {
DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
__FUNCTION__));
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->intstatus = 0;
}
dhd_bus_t *bus = (dhd_bus_t*)arg;
bcmsdh_info_t *sdh;
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
if (!bus) {
DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
return;
if (chipid == BCM4369_CHIP_ID)
return TRUE;
-
if (chipid == BCM4362_CHIP_ID)
return TRUE;
if (chipid == BCM43751_CHIP_ID)
return TRUE;
+ if (chipid == BCM43752_CHIP_ID)
+ return TRUE;
return FALSE;
}
#endif /* GET_CUSTOM_MAC_ENABLE */
/* Ok, have the per-port tell the stack we're open for business */
- if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+ if (dhd_attach_net(bus->dhd, TRUE) != 0)
+ {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
goto fail;
}
value = F0_BLOCK_SIZE;
else
value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16);
+ /* Get block size from sd */
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),
+ &size, sizeof(int32), FALSE) != BCME_OK) {
+ size = 0;
+ DHD_ERROR(("%s: fail on fn %d %s get\n",
+ __FUNCTION__, fn, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
+ __FUNCTION__, fn, "sd_blocksize", size));
+ }
+ if (size != 0 && size < value) {
+ value = size;
+ }
+ value = fn << 16 | value;
if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value,
sizeof(value), TRUE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__,
- "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s set\n", __FUNCTION__,
+ fn, "sd_blocksize"));
}
#endif
#ifdef DHD_DEBUG
case BCM43751_CHIP_ID:
bus->dongle_ram_base = CR4_43751_RAM_BASE;
break;
+ case BCM43752_CHIP_ID:
+ bus->dongle_ram_base = CR4_43752_RAM_BASE;
+ break;
case BCM4369_CHIP_ID:
bus->dongle_ram_base = CR4_4369_RAM_BASE;
break;
bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
#endif /* !BCMSPI */
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->sleeping = FALSE;
bus->rxflow = FALSE;
if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on fn %d %s get\n", __FUNCTION__, fnum, "sd_blocksize"));
} else {
- DHD_INFO(("%s: Initial value for %s is %d\n",
- __FUNCTION__, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
+ __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
return ret;
}
-void
-dhd_set_path_params(struct dhd_bus *bus)
-{
- /* External conf takes precedence if specified */
- dhd_conf_preinit(bus->dhd);
-
- if (bus->dhd->conf_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
- }
- if (bus->dhd->clm_path[0] == '\0') {
- dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
- }
-#ifdef CONFIG_PATH_AUTO_SELECT
- dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
-#endif
-
- dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
-
- dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
- dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
- dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
-
- dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->fw_path);
- dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->nv_path);
-
- printf("Final fw_path=%s\n", bus->fw_path);
- printf("Final nv_path=%s\n", bus->nv_path);
- printf("Final clm_path=%s\n", bus->dhd->clm_path);
- printf("Final conf_path=%s\n", bus->dhd->conf_path);
-
-}
-
void
dhd_set_bus_params(struct dhd_bus *bus)
{
/* Download the firmware */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
- dhd_set_path_params(bus);
+ dhd_conf_set_path_params(bus->dhd, bus->sdh, bus->sih, bus->fw_path, bus->nv_path);
dhd_set_bus_params(bus);
ret = _dhdsdio_download_firmware(bus);
}
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ /* stop all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
bus->dhd->busstate = DHD_BUS_SUSPEND;
if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
DHD_ERROR(("Tx Request is not ended\n"));
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return -EBUSY;
}
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
if (ret) {
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
}
DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
bus->dhd->busstate = DHD_BUS_DATA;
dhd_os_busbusy_wake(bus->dhd);
+ /* resume all interface network queue. */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return 0;
dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
bus->dhd->up = FALSE;
dhd_txglom_enable(dhdp, FALSE);
dhd_os_sdunlock(dhdp);
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
} else {
/* App must have restored power to device before calling */
- printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
+ printf("\n\n%s: == Power ON ==\n", __FUNCTION__);
if (bus->dhd->dongle_reset) {
/* Turn on WLAN */
bus->cl_devid)) {
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+ DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
-#include <linux/module.h>\r
-#include <linux/kernel.h>\r
-#include <linux/init.h>\r
-#include <linux/platform_device.h>\r
-#include <linux/delay.h>\r
-#include <linux/err.h>\r
-#include <linux/skbuff.h>\r
-\r
-#define DHD_STATIC_VERSION_STR "100.10.315.2"\r
-\r
-#define BCMDHD_SDIO\r
-#define BCMDHD_PCIE\r
-\r
-enum dhd_prealloc_index {\r
- DHD_PREALLOC_PROT = 0,\r
-#if defined(BCMDHD_SDIO)\r
- DHD_PREALLOC_RXBUF = 1,\r
- DHD_PREALLOC_DATABUF = 2,\r
-#endif\r
- DHD_PREALLOC_OSL_BUF = 3,\r
- DHD_PREALLOC_SKB_BUF = 4,\r
- DHD_PREALLOC_WIPHY_ESCAN0 = 5,\r
- DHD_PREALLOC_WIPHY_ESCAN1 = 6,\r
- DHD_PREALLOC_DHD_INFO = 7,\r
- DHD_PREALLOC_DHD_WLFC_INFO = 8,\r
-#ifdef BCMDHD_PCIE\r
- DHD_PREALLOC_IF_FLOW_LKUP = 9,\r
-#endif\r
- DHD_PREALLOC_MEMDUMP_BUF = 10,\r
- DHD_PREALLOC_MEMDUMP_RAM = 11,\r
- DHD_PREALLOC_DHD_WLFC_HANGER = 12,\r
- DHD_PREALLOC_PKTID_MAP = 13,\r
- DHD_PREALLOC_PKTID_MAP_IOCTL = 14,\r
- DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,\r
- DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,\r
- DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,\r
- DHD_PREALLOC_STAT_REPORT_BUF = 18,\r
- DHD_PREALLOC_WL_ESCAN_INFO = 19,\r
- DHD_PREALLOC_FW_VERBOSE_RING = 20,\r
- DHD_PREALLOC_FW_EVENT_RING = 21,\r
- DHD_PREALLOC_DHD_EVENT_RING = 22,\r
- DHD_PREALLOC_NAN_EVENT_RING = 23,\r
- DHD_PREALLOC_MAX\r
-};\r
-\r
-#define STATIC_BUF_MAX_NUM 20\r
-#define STATIC_BUF_SIZE (PAGE_SIZE*2)\r
-\r
-#define DHD_PREALLOC_PROT_SIZE (16 * 1024)\r
-#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024)\r
-#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024)\r
-#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)\r
-#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024)\r
-#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024)\r
-#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024)\r
-#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024)\r
-#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (66 * 1024)\r
-#ifdef CONFIG_64BIT\r
-#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2)\r
-#else\r
-#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024)\r
-#endif\r
-#define FW_VERBOSE_RING_SIZE (256 * 1024)\r
-#define FW_EVENT_RING_SIZE (64 * 1024)\r
-#define DHD_EVENT_RING_SIZE (64 * 1024)\r
-#define NAN_EVENT_RING_SIZE (64 * 1024)\r
-\r
-#if defined(CONFIG_64BIT)\r
-#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024)\r
-#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)\r
-#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)\r
-#else\r
-#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024)\r
-#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)\r
-#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)\r
-#endif /* CONFIG_64BIT */\r
-#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024)\r
-\r
-#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)\r
-#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)\r
-#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)\r
-\r
-#define DHD_SKB_1PAGE_BUF_NUM 8\r
-#ifdef BCMDHD_PCIE\r
-#define DHD_SKB_2PAGE_BUF_NUM 64\r
-#elif defined(BCMDHD_SDIO)\r
-#define DHD_SKB_2PAGE_BUF_NUM 8\r
-#endif\r
-#define DHD_SKB_4PAGE_BUF_NUM 1\r
-\r
-/* The number is defined in linux_osl.c\r
- * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM\r
- * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM\r
- */\r
-#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \\r
- (DHD_SKB_2PAGE_BUF_NUM))\r
-#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM))\r
-\r
-void *wlan_static_prot = NULL;\r
-void *wlan_static_rxbuf = NULL;\r
-void *wlan_static_databuf = NULL;\r
-void *wlan_static_osl_buf = NULL;\r
-void *wlan_static_scan_buf0 = NULL;\r
-void *wlan_static_scan_buf1 = NULL;\r
-void *wlan_static_dhd_info_buf = NULL;\r
-void *wlan_static_dhd_wlfc_info_buf = NULL;\r
-void *wlan_static_if_flow_lkup = NULL;\r
-void *wlan_static_dhd_memdump_ram_buf = NULL;\r
-void *wlan_static_dhd_wlfc_hanger_buf = NULL;\r
-void *wlan_static_wl_escan_info_buf = NULL;\r
-void *wlan_static_fw_verbose_ring_buf = NULL;\r
-void *wlan_static_fw_event_ring_buf = NULL;\r
-void *wlan_static_dhd_event_ring_buf = NULL;\r
-void *wlan_static_nan_event_ring_buf = NULL;\r
-\r
-static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];\r
-\r
-void *dhd_wlan_mem_prealloc(int section, unsigned long size)\r
-{\r
- pr_err("%s: sectoin %d, %ld\n", __func__, section, size);\r
- if (section == DHD_PREALLOC_PROT)\r
- return wlan_static_prot;\r
-\r
-#if defined(BCMDHD_SDIO)\r
- if (section == DHD_PREALLOC_RXBUF)\r
- return wlan_static_rxbuf;\r
-\r
- if (section == DHD_PREALLOC_DATABUF)\r
- return wlan_static_databuf;\r
-#endif /* BCMDHD_SDIO */\r
-\r
- if (section == DHD_PREALLOC_SKB_BUF)\r
- return wlan_static_skb;\r
-\r
- if (section == DHD_PREALLOC_WIPHY_ESCAN0)\r
- return wlan_static_scan_buf0;\r
-\r
- if (section == DHD_PREALLOC_WIPHY_ESCAN1)\r
- return wlan_static_scan_buf1;\r
-\r
- if (section == DHD_PREALLOC_OSL_BUF) {\r
- if (size > DHD_PREALLOC_OSL_BUF_SIZE) {\r
- pr_err("request OSL_BUF(%lu) > %ld\n",\r
- size, DHD_PREALLOC_OSL_BUF_SIZE);\r
- return NULL;\r
- }\r
- return wlan_static_osl_buf;\r
- }\r
-\r
- if (section == DHD_PREALLOC_DHD_INFO) {\r
- if (size > DHD_PREALLOC_DHD_INFO_SIZE) {\r
- pr_err("request DHD_INFO size(%lu) > %d\n",\r
- size, DHD_PREALLOC_DHD_INFO_SIZE);\r
- return NULL;\r
- }\r
- return wlan_static_dhd_info_buf;\r
- }\r
- if (section == DHD_PREALLOC_DHD_WLFC_INFO) {\r
- if (size > WLAN_DHD_WLFC_BUF_SIZE) {\r
- pr_err("request DHD_WLFC_INFO size(%lu) > %d\n",\r
- size, WLAN_DHD_WLFC_BUF_SIZE);\r
- return NULL;\r
- }\r
- return wlan_static_dhd_wlfc_info_buf;\r
- }\r
-#ifdef BCMDHD_PCIE\r
- if (section == DHD_PREALLOC_IF_FLOW_LKUP) {\r
- if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) {\r
- pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n",\r
- size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_if_flow_lkup;\r
- }\r
-#endif /* BCMDHD_PCIE */\r
- if (section == DHD_PREALLOC_MEMDUMP_RAM) {\r
- if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) {\r
- pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n",\r
- size, DHD_PREALLOC_MEMDUMP_RAM_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_dhd_memdump_ram_buf;\r
- }\r
- if (section == DHD_PREALLOC_DHD_WLFC_HANGER) {\r
- if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) {\r
- pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n",\r
- size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);\r
- return NULL;\r
- }\r
- return wlan_static_dhd_wlfc_hanger_buf;\r
- }\r
- if (section == DHD_PREALLOC_WL_ESCAN_INFO) {\r
- if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) {\r
- pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n",\r
- size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_wl_escan_info_buf;\r
- }\r
- if (section == DHD_PREALLOC_FW_VERBOSE_RING) {\r
- if (size > FW_VERBOSE_RING_SIZE) {\r
- pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n",\r
- size, FW_VERBOSE_RING_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_fw_verbose_ring_buf;\r
- }\r
- if (section == DHD_PREALLOC_FW_EVENT_RING) {\r
- if (size > FW_EVENT_RING_SIZE) {\r
- pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n",\r
- size, FW_EVENT_RING_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_fw_event_ring_buf;\r
- }\r
- if (section == DHD_PREALLOC_DHD_EVENT_RING) {\r
- if (size > DHD_EVENT_RING_SIZE) {\r
- pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n",\r
- size, DHD_EVENT_RING_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_dhd_event_ring_buf;\r
- }\r
- if (section == DHD_PREALLOC_NAN_EVENT_RING) {\r
- if (size > NAN_EVENT_RING_SIZE) {\r
- pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n",\r
- size, NAN_EVENT_RING_SIZE);\r
- return NULL;\r
- }\r
-\r
- return wlan_static_nan_event_ring_buf;\r
- }\r
- if ((section < 0) || (section > DHD_PREALLOC_MAX))\r
- pr_err("request section id(%d) is out of max index %d\n",\r
- section, DHD_PREALLOC_MAX);\r
-\r
- pr_err("%s: failed to alloc section %d, size=%ld\n",\r
- __func__, section, size);\r
-\r
- return NULL;\r
-}\r
-EXPORT_SYMBOL(dhd_wlan_mem_prealloc);\r
-\r
-static int dhd_init_wlan_mem(void)\r
-{\r
- int i;\r
- int j;\r
- printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR);\r
-\r
- for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {\r
- wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);\r
- if (!wlan_static_skb[i]) {\r
- goto err_skb_alloc;\r
- }\r
- pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,\r
- DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE);\r
- }\r
-\r
- for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {\r
- wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);\r
- if (!wlan_static_skb[i]) {\r
- goto err_skb_alloc;\r
- }\r
- pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,\r
- DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE);\r
- }\r
-\r
-#if defined(BCMDHD_SDIO)\r
- wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);\r
- if (!wlan_static_skb[i])\r
- goto err_skb_alloc;\r
- pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,\r
- DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE);\r
-#endif /* BCMDHD_SDIO */\r
-\r
- wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL);\r
- if (!wlan_static_prot)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE);\r
-\r
-#if defined(BCMDHD_SDIO)\r
- wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL);\r
- if (!wlan_static_rxbuf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE);\r
-\r
- wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL);\r
- if (!wlan_static_databuf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE);\r
-#endif /* BCMDHD_SDIO */\r
-\r
- wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL);\r
- if (!wlan_static_osl_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%ld\n", __func__,\r
- DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE);\r
-\r
- wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);\r
- if (!wlan_static_scan_buf0)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE);\r
-\r
- wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);\r
- if (!wlan_static_dhd_info_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE);\r
-\r
- wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);\r
- if (!wlan_static_dhd_wlfc_info_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE);\r
-\r
-#ifdef BCMDHD_PCIE\r
- wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);\r
- if (!wlan_static_if_flow_lkup)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);\r
-#endif /* BCMDHD_PCIE */\r
-\r
- wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);\r
- if (!wlan_static_dhd_memdump_ram_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE);\r
-\r
- wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);\r
- if (!wlan_static_dhd_wlfc_hanger_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);\r
-\r
- wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL);\r
- if (!wlan_static_wl_escan_info_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);\r
-\r
- wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);\r
- if (!wlan_static_fw_verbose_ring_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);\r
-\r
- wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);\r
- if (!wlan_static_fw_event_ring_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE);\r
-\r
- wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);\r
- if (!wlan_static_dhd_event_ring_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);\r
-\r
- wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);\r
- if (!wlan_static_nan_event_ring_buf)\r
- goto err_mem_alloc;\r
- pr_err("%s: sectoin %d, size=%d\n", __func__,\r
- DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE);\r
-\r
- return 0;\r
-\r
-err_mem_alloc:\r
-\r
- if (wlan_static_prot)\r
- kfree(wlan_static_prot);\r
-\r
-#if defined(BCMDHD_SDIO)\r
- if (wlan_static_rxbuf)\r
- kfree(wlan_static_rxbuf);\r
-\r
- if (wlan_static_databuf)\r
- kfree(wlan_static_databuf);\r
-#endif /* BCMDHD_SDIO */\r
-\r
- if (wlan_static_osl_buf)\r
- kfree(wlan_static_osl_buf);\r
-\r
- if (wlan_static_scan_buf0)\r
- kfree(wlan_static_scan_buf0);\r
-\r
- if (wlan_static_scan_buf1)\r
- kfree(wlan_static_scan_buf1);\r
-\r
- if (wlan_static_dhd_info_buf)\r
- kfree(wlan_static_dhd_info_buf);\r
-\r
- if (wlan_static_dhd_wlfc_info_buf)\r
- kfree(wlan_static_dhd_wlfc_info_buf);\r
-\r
-#ifdef BCMDHD_PCIE\r
- if (wlan_static_if_flow_lkup)\r
- kfree(wlan_static_if_flow_lkup);\r
-#endif /* BCMDHD_PCIE */\r
-\r
- if (wlan_static_dhd_memdump_ram_buf)\r
- kfree(wlan_static_dhd_memdump_ram_buf);\r
-\r
- if (wlan_static_dhd_wlfc_hanger_buf)\r
- kfree(wlan_static_dhd_wlfc_hanger_buf);\r
-\r
- if (wlan_static_wl_escan_info_buf)\r
- kfree(wlan_static_wl_escan_info_buf);\r
-\r
-#ifdef BCMDHD_PCIE\r
- if (wlan_static_fw_verbose_ring_buf)\r
- kfree(wlan_static_fw_verbose_ring_buf);\r
-\r
- if (wlan_static_fw_event_ring_buf)\r
- kfree(wlan_static_fw_event_ring_buf);\r
-\r
- if (wlan_static_dhd_event_ring_buf)\r
- kfree(wlan_static_dhd_event_ring_buf);\r
-\r
- if (wlan_static_nan_event_ring_buf)\r
- kfree(wlan_static_nan_event_ring_buf);\r
-#endif /* BCMDHD_PCIE */\r
-\r
- pr_err("%s: Failed to mem_alloc for WLAN\n", __func__);\r
-\r
- i = WLAN_SKB_BUF_NUM;\r
-\r
-err_skb_alloc:\r
- pr_err("%s: Failed to skb_alloc for WLAN\n", __func__);\r
- for (j = 0; j < i; j++)\r
- dev_kfree_skb(wlan_static_skb[j]);\r
-\r
- return -ENOMEM;\r
-}\r
-\r
-static int __init\r
-dhd_static_buf_init(void)\r
-{\r
- dhd_init_wlan_mem();\r
-\r
- return 0;\r
-}\r
-\r
-static void __exit\r
-dhd_static_buf_exit(void)\r
-{\r
- int i;\r
-\r
- pr_err("%s()\n", __FUNCTION__);\r
-\r
- for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {\r
- if (wlan_static_skb[i])\r
- dev_kfree_skb(wlan_static_skb[i]);\r
- }\r
-\r
- for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {\r
- if (wlan_static_skb[i])\r
- dev_kfree_skb(wlan_static_skb[i]);\r
- }\r
-\r
-#if defined(BCMDHD_SDIO)\r
- if (wlan_static_skb[i])\r
- dev_kfree_skb(wlan_static_skb[i]);\r
-#endif /* BCMDHD_SDIO */\r
-\r
- if (wlan_static_prot)\r
- kfree(wlan_static_prot);\r
-\r
-#if defined(BCMDHD_SDIO)\r
- if (wlan_static_rxbuf)\r
- kfree(wlan_static_rxbuf);\r
-\r
- if (wlan_static_databuf)\r
- kfree(wlan_static_databuf);\r
-#endif /* BCMDHD_SDIO */\r
-\r
- if (wlan_static_osl_buf)\r
- kfree(wlan_static_osl_buf);\r
-\r
- if (wlan_static_scan_buf0)\r
- kfree(wlan_static_scan_buf0);\r
-\r
- if (wlan_static_scan_buf1)\r
- kfree(wlan_static_scan_buf1);\r
-\r
- if (wlan_static_dhd_info_buf)\r
- kfree(wlan_static_dhd_info_buf);\r
-\r
- if (wlan_static_dhd_wlfc_info_buf)\r
- kfree(wlan_static_dhd_wlfc_info_buf);\r
-\r
-#ifdef BCMDHD_PCIE\r
- if (wlan_static_if_flow_lkup)\r
- kfree(wlan_static_if_flow_lkup);\r
-#endif /* BCMDHD_PCIE */\r
-\r
- if (wlan_static_dhd_memdump_ram_buf)\r
- kfree(wlan_static_dhd_memdump_ram_buf);\r
-\r
- if (wlan_static_dhd_wlfc_hanger_buf)\r
- kfree(wlan_static_dhd_wlfc_hanger_buf);\r
-\r
- if (wlan_static_wl_escan_info_buf)\r
- kfree(wlan_static_wl_escan_info_buf);\r
-\r
-#ifdef BCMDHD_PCIE\r
- if (wlan_static_fw_verbose_ring_buf)\r
- kfree(wlan_static_fw_verbose_ring_buf);\r
-\r
- if (wlan_static_fw_event_ring_buf)\r
- kfree(wlan_static_fw_event_ring_buf);\r
-\r
- if (wlan_static_dhd_event_ring_buf)\r
- kfree(wlan_static_dhd_event_ring_buf);\r
-\r
- if (wlan_static_nan_event_ring_buf)\r
- kfree(wlan_static_nan_event_ring_buf);\r
-#endif\r
-\r
- return;\r
-}\r
-\r
-module_init(dhd_static_buf_init);\r
-\r
-module_exit(dhd_static_buf_exit);\r
+/*
+ * drivers/amlogic/wifi/dhd_static_buf.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "Wifi: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/skbuff.h>
+#include <linux/wlan_plat.h>
+#include <linux/amlogic/dhd_buf.h>
+
+#define DHD_STATIC_VERSION_STR "100.10.545.3"
+
+#define BCMDHD_SDIO
+#define BCMDHD_PCIE
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+#if defined(BCMDHD_SDIO)
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+#endif
+ DHD_PREALLOC_OSL_BUF = 3,
+ DHD_PREALLOC_SKB_BUF = 4,
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+#ifdef BCMDHD_PCIE
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
+#endif
+ DHD_PREALLOC_MEMDUMP_BUF = 10,
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_STAT_REPORT_BUF = 18,
+ DHD_PREALLOC_WL_WEXT_INFO = 19,
+ DHD_PREALLOC_FW_VERBOSE_RING = 20,
+ DHD_PREALLOC_FW_EVENT_RING = 21,
+ DHD_PREALLOC_DHD_EVENT_RING = 22,
+ DHD_PREALLOC_NAN_EVENT_RING = 23,
+ DHD_PREALLOC_MAX
+};
+
+#define STATIC_BUF_MAX_NUM 20
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+
+#define DHD_PREALLOC_PROT_SIZE (16 * 1024)
+#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024)
+#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024)
+#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024)
+#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024)
+#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024)
+#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024)
+#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
+#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024)
+#define DHD_PREALLOC_WL_WEXT_INFO_SIZE (70 * 1024)
+#ifdef CONFIG_64BIT
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2)
+#else
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif
+#define FW_VERBOSE_RING_SIZE (256 * 1024)
+#define FW_EVENT_RING_SIZE (64 * 1024)
+#define DHD_EVENT_RING_SIZE (64 * 1024)
+#define NAN_EVENT_RING_SIZE (64 * 1024)
+
+#if defined(CONFIG_64BIT)
+#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)
+#else
+#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif /* CONFIG_64BIT */
+#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024)
+
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define DHD_SKB_1PAGE_BUF_NUM 8
+#define DHD_SKB_2PAGE_BUF_NUM 64
+#define DHD_SKB_4PAGE_BUF_NUM 1
+
+/* The number is defined in linux_osl.c
+ * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM
+ * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM
+ */
+#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \
+ (DHD_SKB_2PAGE_BUF_NUM))
+#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM))
+
+void *wlan_static_prot;
+void *wlan_static_rxbuf;
+void *wlan_static_databuf;
+void *wlan_static_osl_buf;
+void *wlan_static_scan_buf0;
+void *wlan_static_scan_buf1;
+void *wlan_static_dhd_info_buf;
+void *wlan_static_dhd_wlfc_info_buf;
+void *wlan_static_if_flow_lkup;
+void *wlan_static_dhd_memdump_ram_buf;
+void *wlan_static_dhd_wlfc_hanger_buf;
+void *wlan_static_dhd_log_dump_buf;
+void *wlan_static_dhd_log_dump_buf_ex;
+void *wlan_static_wl_escan_info_buf;
+void *wlan_static_fw_verbose_ring_buf;
+void *wlan_static_fw_event_ring_buf;
+void *wlan_static_dhd_event_ring_buf;
+void *wlan_static_nan_event_ring_buf;
+
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
+
+void *bcmdhd_mem_prealloc(int section, unsigned long size)
+{
+ pr_info("sectoin %d, size %ld\n", section, size);
+ if (section == DHD_PREALLOC_PROT)
+ return wlan_static_prot;
+
+#if defined(BCMDHD_SDIO)
+ if (section == DHD_PREALLOC_RXBUF)
+ return wlan_static_rxbuf;
+
+ if (section == DHD_PREALLOC_DATABUF)
+ return wlan_static_databuf;
+#endif /* BCMDHD_SDIO */
+
+ if (section == DHD_PREALLOC_SKB_BUF)
+ return wlan_static_skb;
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN0)
+ return wlan_static_scan_buf0;
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN1)
+ return wlan_static_scan_buf1;
+
+ if (section == DHD_PREALLOC_OSL_BUF) {
+ if (size > DHD_PREALLOC_OSL_BUF_SIZE) {
+ pr_err("request OSL_BUF(%lu) > %ld\n",
+ size, DHD_PREALLOC_OSL_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_osl_buf;
+ }
+
+ if (section == DHD_PREALLOC_DHD_INFO) {
+ if (size > DHD_PREALLOC_DHD_INFO_SIZE) {
+ pr_err("request DHD_INFO size(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_INFO_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_info_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_WLFC_INFO) {
+ if (size > WLAN_DHD_WLFC_BUF_SIZE) {
+ pr_err("request DHD_WLFC_INFO size(%lu) > %d\n",
+ size, WLAN_DHD_WLFC_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_info_buf;
+ }
+#ifdef BCMDHD_PCIE
+ if (section == DHD_PREALLOC_IF_FLOW_LKUP) {
+ if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) {
+ pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n",
+ size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_if_flow_lkup;
+ }
+#endif /* BCMDHD_PCIE */
+ if (section == DHD_PREALLOC_MEMDUMP_RAM) {
+ if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) {
+ pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_MEMDUMP_RAM_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_memdump_ram_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_WLFC_HANGER) {
+ if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) {
+ pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_hanger_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF) {
+ if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE) {
+ pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_log_dump_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX) {
+ if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE) {
+ pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_log_dump_buf_ex;
+ }
+ if (section == DHD_PREALLOC_WL_WEXT_INFO) {
+ if (size > DHD_PREALLOC_WL_WEXT_INFO_SIZE) {
+ pr_err("request DHD_PREALLOC_WL_WEXT_INFO_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_WL_WEXT_INFO_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_wl_escan_info_buf;
+ }
+ if (section == DHD_PREALLOC_FW_VERBOSE_RING) {
+ if (size > FW_VERBOSE_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n",
+ size, FW_VERBOSE_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_fw_verbose_ring_buf;
+ }
+ if (section == DHD_PREALLOC_FW_EVENT_RING) {
+ if (size > FW_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n",
+ size, FW_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_fw_event_ring_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_EVENT_RING) {
+ if (size > DHD_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n",
+ size, DHD_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_event_ring_buf;
+ }
+ if (section == DHD_PREALLOC_NAN_EVENT_RING) {
+ if (size > NAN_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n",
+ size, NAN_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_nan_event_ring_buf;
+ }
+ if ((section < 0) || (section > DHD_PREALLOC_MAX))
+ pr_err("request section id(%d) is out of max index %d\n",
+ section, DHD_PREALLOC_MAX);
+
+ pr_err("%s: failed to alloc section %d, size=%ld\n",
+ __func__, section, size);
+
+ return NULL;
+}
+EXPORT_SYMBOL(bcmdhd_mem_prealloc);
+
+int bcmdhd_init_wlan_mem(void)
+{
+ int i;
+ int j;
+ pr_info("%s(): %s\n", __func__, DHD_STATIC_VERSION_STR);
+
+ for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
+ if (!wlan_static_skb[i])
+ goto err_skb_alloc;
+ }
+
+ for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
+ if (!wlan_static_skb[i])
+ goto err_skb_alloc;
+ }
+
+#if defined(BCMDHD_SDIO)
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
+ if (!wlan_static_skb[i])
+ goto err_skb_alloc;
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL);
+ if (!wlan_static_prot)
+ goto err_mem_alloc;
+
+#if defined(BCMDHD_SDIO)
+ wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_rxbuf)
+ goto err_mem_alloc;
+
+ wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_databuf)
+ goto err_mem_alloc;
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_osl_buf)
+ goto err_mem_alloc;
+
+ wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);
+ if (!wlan_static_scan_buf0)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_info_buf)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_info_buf)
+ goto err_mem_alloc;
+
+#ifdef BCMDHD_PCIE
+ wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);
+ if (!wlan_static_if_flow_lkup)
+ goto err_mem_alloc;
+#endif /* BCMDHD_PCIE */
+
+ wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_memdump_ram_buf)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_hanger_buf)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_log_dump_buf = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_log_dump_buf_ex)
+ goto err_mem_alloc;
+
+ wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_WEXT_INFO_SIZE, GFP_KERNEL);
+ if (!wlan_static_wl_escan_info_buf)
+ goto err_mem_alloc;
+
+ wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_verbose_ring_buf)
+ goto err_mem_alloc;
+
+ wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_event_ring_buf)
+ goto err_mem_alloc;
+
+ wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_event_ring_buf)
+ goto err_mem_alloc;
+
+ wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_nan_event_ring_buf)
+ goto err_mem_alloc;
+
+ pr_info("bcmdhd_init_wlan_mem prealloc ok\n");
+ return 0;
+
+err_mem_alloc:
+
+ if (wlan_static_prot)
+ kfree(wlan_static_prot);
+
+#if defined(BCMDHD_SDIO)
+ if (wlan_static_rxbuf)
+ kfree(wlan_static_rxbuf);
+
+ if (wlan_static_databuf)
+ kfree(wlan_static_databuf);
+#endif /* BCMDHD_SDIO */
+
+ if (wlan_static_osl_buf)
+ kfree(wlan_static_osl_buf);
+
+ if (wlan_static_scan_buf0)
+ kfree(wlan_static_scan_buf0);
+
+ if (wlan_static_scan_buf1)
+ kfree(wlan_static_scan_buf1);
+
+ if (wlan_static_dhd_info_buf)
+ kfree(wlan_static_dhd_info_buf);
+
+ if (wlan_static_dhd_wlfc_info_buf)
+ kfree(wlan_static_dhd_wlfc_info_buf);
+
+#ifdef BCMDHD_PCIE
+ if (wlan_static_if_flow_lkup)
+ kfree(wlan_static_if_flow_lkup);
+#endif /* BCMDHD_PCIE */
+
+ if (wlan_static_dhd_memdump_ram_buf)
+ kfree(wlan_static_dhd_memdump_ram_buf);
+
+ if (wlan_static_dhd_wlfc_hanger_buf)
+ kfree(wlan_static_dhd_wlfc_hanger_buf);
+
+ if (wlan_static_dhd_log_dump_buf)
+ kfree(wlan_static_dhd_log_dump_buf);
+
+ if (wlan_static_dhd_log_dump_buf_ex)
+ kfree(wlan_static_dhd_log_dump_buf_ex);
+
+ if (wlan_static_wl_escan_info_buf)
+ kfree(wlan_static_wl_escan_info_buf);
+
+#ifdef BCMDHD_PCIE
+ if (wlan_static_fw_verbose_ring_buf)
+ kfree(wlan_static_fw_verbose_ring_buf);
+
+ if (wlan_static_fw_event_ring_buf)
+ kfree(wlan_static_fw_event_ring_buf);
+
+ if (wlan_static_dhd_event_ring_buf)
+ kfree(wlan_static_dhd_event_ring_buf);
+
+ if (wlan_static_nan_event_ring_buf)
+ kfree(wlan_static_nan_event_ring_buf);
+#endif /* BCMDHD_PCIE */
+
+ pr_err("%s: Failed to mem_alloc for WLAN\n", __func__);
+
+ i = WLAN_SKB_BUF_NUM;
+
+err_skb_alloc:
+ pr_err("%s: Failed to skb_alloc for WLAN\n", __func__);
+ for (j = 0; j < i; j++)
+ dev_kfree_skb(wlan_static_skb[j]);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(bcmdhd_init_wlan_mem);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AMLOGIC");
+MODULE_DESCRIPTION("wifi device tree driver");
/*
* DHD PROP_TXSTATUS Module.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
dhd_os_wlfc_block(dhd);
if (dhd->wlfc_enabled) {
- DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+ DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
dhd_os_wlfc_unblock(dhd);
return BCME_OK;
}
Leaving the message for now, it should be removed after a while; once
the tlv situation is stable.
*/
- DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
dhd->wlfc_enabled?"enabled":"disabled", tlv));
}
ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0);
if (!ret) {
- DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+ DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
if (WLFC_IS_OLD_DEF(fw_caps)) {
#ifdef BCMDBUS
}
}
- DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+ DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
#ifdef LIMIT_BORROW
dhd->wlfc_borrow_allowed = TRUE;
#endif // endif
dhd->proptxstatus_mode = hostreorder ?
WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
- DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv));
-
dhd_os_wlfc_unblock(dhd);
if (dhd->plat_deinit)
/*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Common stats definitions for clients of dongle
* ports
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Dongle WL Header definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* IE/TLV fragmentation/defragmentation support for
* Broadcom 802.11bang Networking Device Driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
}
/* adjust buf_len to length after ie including it */
- buf_len -= ((const uint8 *)ie - (const uint8 *)buf);
+ buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf));
/* update length from fragments, okay if no next ie */
while ((ie = bcm_next_tlv(ie, &buf_len)) &&
* IE/TLV (de)fragmentation declarations/definitions for
* Broadcom 802.11abgn Networking Device Driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Misc utility routines for accessing lhl specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Utility routines for configuring different memories in Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Misc utility routines for accessing PMU corerev specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hndpmu.c 714395 2017-08-04 08:22:31Z $
+ * $Id: hndpmu.c 783841 2018-10-09 06:24:16Z $
*/
/**
/*
* Fundamental types and constants relating to 802.11
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: 802.11.h 765782 2018-06-05 10:47:00Z $
+ * $Id: 802.11.h 814166 2019-04-10 06:14:49Z $
*/
#ifndef _802_11_H_
uint16 capability;
} BWL_POST_PACKED_STRUCT;
#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */
-#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */
+#define DOT11_BCN_PRB_FIXED_LEN 12u /* 802.11 beacon/probe frame fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_auth {
uint16 alg; /* algorithm */
#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6
#define DOT11_EXTCAP_LEN_TDLS_WBW 8
#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8
+#define DOT11_EXTCAP_LEN_TWT 10u
/* TDLS Capabilities */
#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */
#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */
#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */
#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */
+#define DOT11_MNG_NONTRANS_BSSID_CAP_ID 83 /* 11k nontransmitted BSSID capability */
+#define DOT11_MNG_MULTIPLE_BSSIDINDEX_ID 85 /* 11k multiple BSSID index */
#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */
#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */
#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */
#define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID)
#define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */
#define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID)
-#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */
-#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID)
+#define EXT_MNG_UORA_ID 37u /* UORA Parameter Set */
+#define DOT11_MNG_UORA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_UORA_ID)
#define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */
#define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID)
#define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */
#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID
#define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID
#define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS
+#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */
+#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID)
/* End of deprecated definitions */
#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\
#define DOT11_EXT_CAP_TIMBC 18
/* BSS Transition Management support bit position */
#define DOT11_EXT_CAP_BSSTRANS_MGMT 19
+/* Multiple BSSID support position */
+#define DOT11_EXT_CAP_MULTIBSSID 22
/* Direct Multicast Service */
#define DOT11_EXT_CAP_DMS 26
/* Interworking support bit position */
#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */
#define DOT11_EXT_CAP_FILS 72 /* FILS Capability */
/* TWT support */
-#define DOT11_EXT_CAP_TWT_REQUESTER 75
-#define DOT11_EXT_CAP_TWT_RESPONDER 76
+#define DOT11_EXT_CAP_TWT_REQUESTER 77
+#define DOT11_EXT_CAP_TWT_RESPONDER 78
+#define DOT11_EXT_CAP_OBSS_NB_RU_OFDMA 79
+#define DOT11_EXT_CAP_EMBSS_ADVERTISE 80
/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset.
* Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path.
* It will cause ROM invalidation otherwise.
*/
-#define DOT11_EXT_CAP_MAX_IDX 76
+#define DOT11_EXT_CAP_MAX_IDX 80
#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */
+#define DOT11_BSSTRANS_REQ_FIXED_LEN 7u /* Fixed length */
/* BSS Mgmt Transition Request Mode Field - 802.11v */
#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01
struct ether_addr bssid;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
-#define DOT11_RMREQ_BCN_LEN 18
+#define DOT11_RMREQ_BCN_LEN 18u
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
uint8 reg;
#define DOT11_RMREP_BCN_LAST_RPT_IND 164
#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */
+/* Refer IEEE P802.11-REVmd/D1.0 9.4.2.21.7 Beacon report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_fragmt_id {
+ uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
+ uint8 len; /* length of remaining fields */
+ /* More fragments(B15), fragment Id(B8-B14), Bcn rpt instance ID (B0 - B7) */
+ uint16 frag_info_rpt_id;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_rmrep_bcn_frm_body_fragmt_id dot11_rmrep_bcn_frm_body_fragmt_id_t;
+
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id {
uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
uint8 len; /* length of remaining fields */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t;
-#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2
+#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2u
#define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t)
-#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1
+#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1u
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_fragmt_id_t)
+#define DOT11_RMREP_BCNRPT_BCN_RPT_ID_MASK 0x00FFu
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_SHIFT 8u
+#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_MASK 0x7F00u
+#define DOT11_RMREP_BCNRPT_MORE_FRAG_SHIFT 15u
+#define DOT11_RMREP_BCNRPT_MORE_FRAG_MASK 0x8000u
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind {
uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */
BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */
};
+/*
+ * This BRCM_PROP_OUI types is intended for use in events to embed additional
+ * data, and would not be expected to appear on the air -- but having an IE
+ * format allows IE frame data with extra data in events in that allows for
+ * more flexible parsing.
+ */
+#define BRCM_EVT_WL_BSS_INFO 64
+
+/**
+ * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
+ * DPT uses this format with type set to DPT_IE_TYPE
+ */
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3];
+ uint8 type; /* type of this IE */
+ uint16 cap; /* DPT capabilities */
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+
+#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */
+
+#define DPT_IE_TYPE 2
+
+#define BRCM_SYSCAP_IE_TYPE 3
+#define WET_TUNNEL_IE_TYPE 3
+
/* brcm syscap_ie cap */
#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */
VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \
VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0
+#ifdef IBSS_RMC
+/* customer's OUI */
+#define RMC_PROP_OUI "\x00\x16\x32"
+#endif // endif
+
/* ************* WPA definitions. ************* */
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_LEN 3 /* WPA OUI length */
#define WPA_OUI_TYPE 1
#define WPA_VERSION 1 /* WPA version */
#define WPA_VERSION_LEN 2 /* WPA version length */
+
+/* ************* WPA2 definitions. ************* */
#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */
#define WPA2_OUI_LEN 3 /* WPA2 OUI length */
#define WPA2_VERSION 1 /* WPA2 version */
#define WPA2_VERSION_LEN 2 /* WAP2 version length */
-#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */
+#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */
/* ************* WPS definitions. ************* */
#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */
#define WPS_OUI_LEN 3 /* WPS OUI length */
#define WPS_OUI_TYPE 4
+/* ************* TPC definitions. ************* */
+#define TPC_OUI "\x00\x50\xF2" /* TPC OUI */
+#define TPC_OUI_LEN 3 /* TPC OUI length */
+#define TPC_OUI_TYPE 8
+#define WFA_OUI_TYPE_TPC 8 /* deprecated */
+
/* ************* WFA definitions. ************* */
#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
-#define WFA_OUI_LEN 3 /* WFA OUI length */
+#define WFA_OUI_LEN 3 /* WFA OUI length */
#define WFA_OUI_TYPE_P2P 9
#ifdef WL_LEGACY_P2P
-#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */
+#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */
#define APPLE_OUI_LEN 3
#define APPLE_OUI_TYPE_P2P 5
#endif /* WL_LEGACY_P2P */
#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P
#endif /* !WL_LEGACY_P2P */
-#define WFA_OUI_TYPE_TPC 8
#ifdef WLTDLS
#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */
#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */
/* length of data portion of Mobility Domain IE */
#define DOT11_MDID_IE_DATA_LEN 3
-
+#define DOT11_MDID_LEN 2
#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */
#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */
-/** Fast Bss Transition IE */
+/* Fast Bss Transition IE */
+#ifdef FT_IE_VER_V2
+typedef BWL_PRE_PACKED_STRUCT struct dot11_ft_ie_v2 {
+ uint8 id;
+ uint8 len;
+ uint16 mic_control;
+ /* dynamic offset to following mic[], anonce[], snonce[] */
+} BWL_POST_PACKED_STRUCT dot11_ft_ie_v2;
+typedef struct dot11_ft_ie_v2 dot11_ft_ie_t;
+#else
BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
uint8 id;
uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */
* optional parameters, which if present, could raise the FT IE length to 255.
*/
#define DOT11_FT_IE_FIXED_LEN 82
-
+#endif /* FT_IE_VER_V2 */
+
+#ifdef FT_IE_VER_V2
+#define DOT11_FT_IE_LEN(mic_len) (sizeof(dot11_ft_ie_v2) + mic_len + EAPOL_WPA_KEY_NONCE_LEN *2)
+#define FT_IE_MIC(pos) ((uint8 *)pos + sizeof(dot11_ft_ie_v2))
+#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len)
+#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len + \
+ EAPOL_WPA_KEY_NONCE_LEN)
+#else
+#define DOT11_FT_IE_LEN(mic_len) sizeof(dot11_ft_ie)
+#define FT_IE_MIC(pos) ((uint8 *)&pos->mic)
+#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)&pos->anonce)
+#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)&pos->snonce)
+#endif /* FT_IE_VER_V2 */
#define TIE_TYPE_RESERVED 0
#define TIE_TYPE_REASSOC_DEADLINE 1
#define TIE_TYPE_KEY_LIEFTIME 2
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t;
+/* same as payload of dot11_ftm_vs_ie.
+* This definition helps in having struct access
+* of pay load while building FTM VS IE from other modules(NAN)
+*/
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie_pyld {
+ uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */
+ uint8 version;
+ ftm_vs_tlv_t tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_ie_pyld dot11_ftm_vs_ie_pyld_t;
+
/* ftm vs api version */
#define BCM_FTM_VS_PARAMS_VERSION 0x01
#define BCM_AIBSS_IE_TYPE 56
+#define SSE_OUI "\x00\x00\xF0"
+#define VENDOR_ENTERPRISE_STA_OUI_TYPE 0x22
+#define MAX_VSIE_DISASSOC (1)
+#define DISCO_VSIE_LEN 0x09u
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
* 802.11e protocol header file
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $
+ * $Id: 802.11e.h 785355 2018-10-18 05:32:56Z $
*/
#ifndef _802_11e_H_
/*
* Fundamental types and constants relating to 802.11s Mesh
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental types and constants relating to 802.1D
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental constants relating to 802.3
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom AMBA Interconnect definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* BCM common config options
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* and instrumentation on top of the heap, without modifying the heap
* allocation implementation.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* NOTE: A ring of size N, may only hold N-1 elements.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* private L1 data cache.
* +----------------------------------------------------------------------------
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
--- /dev/null
+/*
+ * Fundamental constants relating to ARP Protocol
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmarp.h 701633 2017-05-25 23:07:17Z $
+ */
+
+#ifndef _bcmarp_h_
+#define _bcmarp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif // endif
+#include <bcmip.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ARP_OPC_OFFSET 6 /* option code offset */
+#define ARP_SRC_ETH_OFFSET 8 /* src h/w address offset */
+#define ARP_SRC_IP_OFFSET 14 /* src IP address offset */
+#define ARP_TGT_ETH_OFFSET 18 /* target h/w address offset */
+#define ARP_TGT_IP_OFFSET 24 /* target IP address offset */
+
+#define ARP_OPC_REQUEST 1 /* ARP request */
+#define ARP_OPC_REPLY 2 /* ARP reply */
+
+#define ARP_DATA_LEN 28 /* ARP data length */
+
+#define HTYPE_ETHERNET 1 /* htype for ethernet */
+BWL_PRE_PACKED_STRUCT struct bcmarp {
+ uint16 htype; /* Header type (1 = ethernet) */
+ uint16 ptype; /* Protocol type (0x800 = IP) */
+ uint8 hlen; /* Hardware address length (Eth = 6) */
+ uint8 plen; /* Protocol address length (IP = 4) */
+ uint16 oper; /* ARP_OPC_... */
+ uint8 src_eth[ETHER_ADDR_LEN]; /* Source hardware address */
+ uint8 src_ip[IPV4_ADDR_LEN]; /* Source protocol address (not aligned) */
+ uint8 dst_eth[ETHER_ADDR_LEN]; /* Destination hardware address */
+ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination protocol address */
+} BWL_POST_PACKED_STRUCT;
+
+/* Ethernet header + Arp message */
+BWL_PRE_PACKED_STRUCT struct bcmetharp {
+ struct ether_header eh;
+ struct bcmarp arp;
+} BWL_POST_PACKED_STRUCT;
+
+/* IPv6 Neighbor Advertisement */
+#define NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET 8 /* src IPv6 address offset */
+#define NEIGHBOR_ADVERTISE_TYPE_OFFSET 40 /* type offset */
+#define NEIGHBOR_ADVERTISE_CHECKSUM_OFFSET 42 /* check sum offset */
+#define NEIGHBOR_ADVERTISE_FLAGS_OFFSET 44 /* R,S and O flags offset */
+#define NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET 48 /* target IPv6 address offset */
+#define NEIGHBOR_ADVERTISE_OPTION_OFFSET 64 /* options offset */
+#define NEIGHBOR_ADVERTISE_TYPE 136
+#define NEIGHBOR_SOLICITATION_TYPE 135
+
+#define OPT_TYPE_SRC_LINK_ADDR 1
+#define OPT_TYPE_TGT_LINK_ADDR 2
+
+#define NEIGHBOR_ADVERTISE_DATA_LEN 72 /* neighbor advertisement data length */
+#define NEIGHBOR_ADVERTISE_FLAGS_VALUE 0x60 /* R=0, S=1 and O=1 */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* !defined(_bcmarp_h_) */
/*
* Bloom filter support
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Misc system wide definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmdefs.h 700870 2017-05-22 19:05:22Z $
+ * $Id: bcmdefs.h 788740 2018-11-13 21:45:01Z $
*/
#ifndef _bcmdefs_h_
* Define these diagnostic macros to help suppress cast-qual warning
* until all the work can be done to fix the casting issues.
*/
-#if defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
+#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6)))
#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
#define GCC_DIAGNOSTIC_POP()
#endif /* Diagnostic macros not defined */
+/* Support clang for MACOSX compiler */
+#ifdef __clang__
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wformat-nonliteral\"")
+#define CLANG_DIAGNOSTIC_POP() \
+ _Pragma("clang diagnostic pop")
+#else
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
+#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
+#define CLANG_DIAGNOSTIC_POP()
+#endif // endif
/* Compile-time assert can be used in place of ASSERT if the expression evaluates
* to a constant at compile time.
*/
/*
* Broadcom device-specific manifest constants.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmdevs.h 760196 2018-04-30 10:57:59Z $
+ * $Id: bcmdevs.h 825481 2019-06-14 10:06:03Z $
*/
#ifndef _BCMDEVS_H
#define BCM43751_D11AX_ID 0x4490 /* 43751 802.11ax dualband device */
#define BCM43751_D11AX2G_ID 0x4491 /* 43751 802.11ax 2.4G device */
#define BCM43751_D11AX5G_ID 0x4492 /* 43751 802.11ax 5G device */
+#define BCM43752_D11AX_ID 0x4490 /* 43752 802.11ax dualband device */
+#define BCM43752_D11AX2G_ID 0x4491 /* 43752 802.11ax 2.4G device */
+#define BCM43752_D11AX5G_ID 0x4492 /* 43752 802.11ax 5G device */
#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */
#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */
#define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */
#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */
#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */
+#define BCM43752_CHIP_ID 0xAAE8 /* 43752 chipcommon chipid */
#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \
(CHIPID(chipid) == BCM4357_CHIP_ID) || \
/*
* Fundamental constants relating to DHCP Protocol
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Byte order utilities
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmendian.h 633810 2016-04-25 16:46:55Z $
+ * $Id: bcmendian.h 788572 2018-11-13 03:52:19Z $
*
* This file by default provides proper behavior on little-endian architectures.
* On big-endian architectures, IL_BIGENDIAN should be defined.
#define load64_ua(a) ltoh64_ua(a)
#define store64_ua(a, v) htol64_ua_store(v, a)
-#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
-#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
-#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
-#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+#define _LTOH16_UA(cp) (uint16)((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) (uint32)((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (uint16)(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (uint32)(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \
((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \
/*
* Broadcom Ethernettype protocol definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* Dependencies: bcmeth.h
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmevent.h 768756 2018-06-21 05:44:28Z $
+ * $Id: bcmevent.h 822151 2019-05-28 18:37:23Z $
*
*/
#define WLC_E_INVALID_IE 162 /* Received invalid IE */
#define WLC_E_MODE_SWITCH 163 /* Mode switch event */
#define WLC_E_PKT_FILTER 164 /* Packet filter event */
-#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing
- * tx/rxchain
- */
+#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing tx/rxchain */
#define WLC_E_FBT 166 /* FBT event */
-#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
+#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */
#define WLC_E_AGGR_EVENT 169 /* Aggregated event */
#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */
-#define WLC_E_SCAN_START 172 /* Deprecated */
-#define WLC_E_SCAN 172 /* Scan event */
-#define WLC_E_MBO 173 /* MBO event */
-#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */
-#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */
-#define WLC_E_ADPS 176 /* ADPS event */
+#define WLC_E_SCAN_START 172 /* Deprecated */
+#define WLC_E_SCAN 172 /* Scan event */
+#define WLC_E_MBO 173 /* MBO event */
+#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */
+#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */
+#define WLC_E_ADPS 176 /* ADPS event */
#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */
-#define WLC_E_HWA 178 /* HWA events */
-#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */
+#define WLC_E_HWA 178 /* HWA events */
+#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */
#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */
-#define WLC_E_LAST 181 /* highest val + 1 for range checking */
-#if (WLC_E_LAST > 181)
-#error "WLC_E_LAST: Invalid value for last event; must be <= 181."
+#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */
+#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */
+#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */
+#define WLC_E_WA_LQM 184 /* link quality monitoring */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel
+ * dwell time complete
+ */
+#define WLC_E_WSEC 186 /* wsec keymgmt event */
+#define WLC_E_LAST 187 /* highest val + 1 for range checking */
+#if (WLC_E_LAST > 187)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 187."
#endif /* WLC_E_LAST */
/* define an API for getting the string name of an event */
#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */
#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */
#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */
+#define WLC_E_STATUS_RXBCN_ABORT 20 /* Rx Beacon abort event for FAKEAP feature */
+#define WLC_E_STATUS_LOWPOWER_ON_LOWSPAN 21 /* LOWPOWER scan request during LOWSPAN */
#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
/* 4-way handshake event type */
#define WLC_E_REASON_INFRA_ROAM 2
#define WLC_E_REASON_INFRA_DISASSOC 3
#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4
-#define WLC_E_REASON_AWDL_ENABLE 5
-#define WLC_E_REASON_AWDL_DISABLE 6
/* WLC_E_SDB_TRANSITION event data */
#define WL_MAX_BSSCFG 4
/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */
#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */
-#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */
#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */
/* roam reason codes */
#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */
#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */
#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */
-#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */
-#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */
-#define WLC_E_REASON_LAST 16 /* NOTE: increment this as you add reasons above */
+#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */
+#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */
+#define WLC_E_REASON_SILENT_ROAM 16 /* roamed due to Silent roam */
+#define WLC_E_REASON_INACTIVITY 17 /* full roam scan due to inactivity */
+#define WLC_E_REASON_ROAM_SCAN_TIMEOUT 18 /* roam scan timer timeout */
+#define WLC_E_REASON_LAST 19 /* NOTE: increment this as you add reasons above */
/* prune reason codes */
#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */
#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
#endif /* BCMCCX */
#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */
+#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */
+#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */
/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
#define WLC_E_SUP_OTHER 0 /* Other reason */
#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */
#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */
#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */
+#define WLC_E_SUP_PTK_UPDATE 21 /* PTK update */
+#define WLC_E_SUP_MSG1_PMKID_MISMATCH 22 /* MSG1 PMKID not matched to PMKSA cache list */
+
+/* event msg for WLC_E_SUP_PTK_UPDATE */
+typedef struct wlc_sup_ptk_update {
+ uint16 version; /* 0x0001 */
+ uint16 length; /* length of data that follows */
+ uint32 tsf_low; /* tsf at which ptk updated by internal supplicant */
+ uint32 tsf_high;
+ uint8 key_id; /* always 0 for PTK update */
+ uint8 tid; /* tid for the PN below - PTK refresh is per key */
+ uint16 pn_low;
+ uint32 pn_high; /* local highest PN of any tid of the key when M4 was sent */
+} wlc_sup_ptk_update_t;
+
+/* sub event of WLC_E_WSEC */
+typedef enum {
+ WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR = 0x01
+} wl_wsec_event_type_t;
+
+/* sub event msg - WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR */
+struct wlc_wsec_ptk_pn_sync_error_v1 {
+ uint32 tsf_low; /* tsf at which PN sync error happened */
+ uint32 tsf_high;
+ uint8 key_id; /* always 0 for PTK update */
+ uint8 tid; /* tid for the PN below - PTK refresh is per key */
+ uint16 PAD1;
+ uint16 rx_seqn; /* d11 seq number */
+ uint16 pn_low;
+ uint32 pn_high; /* local PN window start for the tid */
+ uint16 key_idx; /* key idx in the keymgmt */
+ uint16 rx_pn_low;
+ uint32 rx_pn_high; /* Rx PN window start for the tid */
+ uint32 span_time; /* time elapsed since replay */
+ uint32 span_pkts; /* pkt count since replay */
+};
+
+typedef struct wlc_wsec_ptk_pn_sync_error_v1 wlc_wsec_ptk_pn_sync_error_t;
+
+/* WLC_E_WSEC event msg */
+typedef struct wlc_wsec_event {
+ uint16 version; /* 0x0001 */
+ uint16 length; /* length of data that follows */
+ uint16 type; /* wsec_event_type_t */
+ uint16 PAD1;
+ union {
+ wlc_wsec_ptk_pn_sync_error_t pn_sync_err;
+ } data;
+} wlc_wsec_event_t;
/* Ucode reason codes carried in the WLC_E_MACDBG event */
#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */
#define WLC_E_IF_ROLE_WDS 2 /* WDS link */
#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */
#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */
-#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
-#define WLC_E_IF_ROLE_NAN 9 /* NAN */
+#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
+#define WLC_E_IF_ROLE_NAN 9 /* NAN */
/* WLC_E_RSSI event data */
typedef struct wl_event_data_rssi {
int32 noise;
} wl_event_data_rssi_t;
+#define WL_EVENT_WA_LQM_VER 0 /* initial version */
+
+#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */
+typedef struct { /* payload of subevent in xtlv */
+ int32 rssi;
+ int32 snr;
+ uint32 tx_rate;
+ uint32 rx_rate;
+} wl_event_wa_lqm_basic_t;
+
+typedef struct wl_event_wa_lqm {
+ uint16 ver; /* version */
+ uint16 len; /* total length structure */
+ uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */
+} wl_event_wa_lqm_t;
+
/* WLC_E_IF flag */
#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
+#define WLC_E_LINK_ASSOC_FAIL 5 /* Link down because assoc to new AP during roaming failed */
/* WLC_E_NDIS_LINK event data */
typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms {
WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */
WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */
WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */
+ WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */
WL_NAN_EVENT_INVALID /* delimiter for max value */
} nan_app_events_e;
CHANSW_NAN = 18, /* channel switch due to NAN */
CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */
CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */
- CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */
- CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */
- CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */
- CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */
- CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */
CHANSW_TDLS = 26, /* channel switch due to TDLS */
CHANSW_PROXD = 27, /* channel switch due to PROXD */
CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */
typedef enum wl_mbo_event_type {
WL_MBO_E_CELLULAR_NW_SWITCH = 1,
+ WL_MBO_E_BTM_RCVD = 2,
/* ADD before this */
- WL_MBO_E_LAST = 2, /* highest val + 1 for range checking */
+ WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */
} wl_mbo_event_type_t;
/* WLC_E_MBO event structure version */
uint32 assoc_time_remain;
};
+/* WLC_E_MBO_BTM_RCVD event structure version */
+#define WL_BTM_EVENT_DATA_VER_1 1
+/* Specific btm event type data */
+struct wl_btm_event_type_data {
+ uint16 version;
+ uint16 len;
+ uint8 transition_reason; /* transition reason code */
+ uint8 pad[3]; /* pad */
+};
+
+/* WLC_E_PRUNE event structure version */
+#define WL_BSSID_PRUNE_EVT_VER_1 1
+/* MBO-OCE params */
+struct wl_bssid_prune_evt_info {
+ uint16 version;
+ uint16 len;
+ uint8 SSID[32];
+ uint32 time_remaining; /* Time remaining */
+ struct ether_addr BSSID;
+ uint8 SSID_len;
+ uint8 reason; /* Reason code */
+ int8 rssi_threshold; /* RSSI threshold */
+ uint8 pad[3]; /* pad */
+};
+
/* WLC_E_HWA Event structure */
typedef struct wl_event_hwa {
uint16 version; /* structure version */
} wl_event_adps_v1_t;
typedef wl_event_adps_v1_t wl_event_adps_t;
+
+#define WLC_USER_E_KEY_UPDATE 1 /* Key add/remove */
+
#endif /* _BCMEVENT_H_ */
--- /dev/null
+/*
+ * Fundamental constants relating to ICMP Protocol
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmicmp.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmicmp_h_
+#define _bcmicmp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif // endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ICMP_TYPE_ECHO_REQUEST 8 /* ICMP type echo request */
+#define ICMP_TYPE_ECHO_REPLY 0 /* ICMP type echo reply */
+
+#define ICMP_CHKSUM_OFFSET 2 /* ICMP body checksum offset */
+
+/* ICMP6 error and control message types */
+#define ICMP6_DEST_UNREACHABLE 1
+#define ICMP6_PKT_TOO_BIG 2
+#define ICMP6_TIME_EXCEEDED 3
+#define ICMP6_PARAM_PROBLEM 4
+#define ICMP6_ECHO_REQUEST 128
+#define ICMP6_ECHO_REPLY 129
+#define ICMP_MCAST_LISTENER_QUERY 130
+#define ICMP_MCAST_LISTENER_REPORT 131
+#define ICMP_MCAST_LISTENER_DONE 132
+#define ICMP6_RTR_SOLICITATION 133
+#define ICMP6_RTR_ADVERTISEMENT 134
+#define ICMP6_NEIGH_SOLICITATION 135
+#define ICMP6_NEIGH_ADVERTISEMENT 136
+#define ICMP6_REDIRECT 137
+
+#define ICMP6_RTRSOL_OPT_OFFSET 8
+#define ICMP6_RTRADV_OPT_OFFSET 16
+#define ICMP6_NEIGHSOL_OPT_OFFSET 24
+#define ICMP6_NEIGHADV_OPT_OFFSET 24
+#define ICMP6_REDIRECT_OPT_OFFSET 40
+
+BWL_PRE_PACKED_STRUCT struct icmp6_opt {
+ uint8 type; /* Option identifier */
+ uint8 length; /* Lenth including type and length */
+ uint8 data[0]; /* Variable length data */
+} BWL_POST_PACKED_STRUCT;
+
+#define ICMP6_OPT_TYPE_SRC_LINK_LAYER 1
+#define ICMP6_OPT_TYPE_TGT_LINK_LAYER 2
+#define ICMP6_OPT_TYPE_PREFIX_INFO 3
+#define ICMP6_OPT_TYPE_REDIR_HDR 4
+#define ICMP6_OPT_TYPE_MTU 5
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmicmp_hdr {
+ uint8 type; /* Echo or Echo-reply */
+ uint8 code; /* Always 0 */
+ uint16 chksum; /* Icmp packet checksum */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _bcmicmp_h_ */
* To be used in firmware and host apps or dhd - reducing code size,
* duplication, and maintenance overhead.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental constants relating to IP Protocol
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmip.h 705929 2017-06-20 00:06:46Z $
+ * $Id: bcmip.h 785436 2018-10-18 17:54:25Z $
*/
#ifndef _bcmip_h_
if (eh->nexthdr == IPV6_EXTHDR_NONE)
return -1;
else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
- hlen = 8;
+ hlen = 8U;
else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
- hlen = (eh->hdrlen + 2) << 2;
+ hlen = (uint16)((eh->hdrlen + 2U) << 2U);
else
- hlen = IPV6_EXTHDR_LEN(eh);
+ hlen = (uint16)IPV6_EXTHDR_LEN(eh);
len += hlen;
eh = (struct ipv6_exthdr *)(h + len);
/*
* Fundamental constants relating to Neighbor Discovery Protocol
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmmsgbuf.h 764293 2018-05-24 17:44:43Z $
+ * $Id: bcmmsgbuf.h 814986 2019-04-15 21:18:21Z $
*/
#ifndef _bcmmsgbuf_h_
#define _bcmmsgbuf_h_
#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32
#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32
+#define D2HRING_EDL_HDR_SIZE 48u
#define D2HRING_EDL_ITEMSIZE 2048u
#define D2HRING_EDL_MAX_ITEM 256u
#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u)
uint8 flags;
} pcie_dma_xfer_params_t;
+#define BCMPCIE_FLOW_RING_INTF_HP2P 0x1
/** Complete msgbuf hdr for flow ring update from host to dongle */
typedef struct tx_flowring_create_request {
cmn_msg_hdr_t msg;
/** Control Completion messages (20 bytes) */
typedef struct compl_msg_hdr {
- /** status for the completion */
- int16 status;
+ union {
+ /** status for the completion */
+ int16 status;
+
+ /* mutually exclusive with pkt fate debug feature */
+ struct pktts_compl_hdr {
+ uint16 d_t4; /* Delta TimeStamp 3: T4-tref */
+ } tx_pktts;
+ };
/** submisison flow ring id which generated this status */
union {
uint16 ring_id;
typedef ts_timestamp_t ts_correction_m_t;
typedef ts_timestamp_t ts_correction_b_t;
+typedef struct _pktts {
+ uint32 tref; /* Ref Clk in uSec (currently, tsf) */
+ uint16 d_t2; /* Delta TimeStamp 1: T2-tref */
+ uint16 d_t3; /* Delta TimeStamp 2: T3-tref */
+} pktts_t;
+
/* completion header status codes */
#define BCMPCIE_SUCCESS 0
#define BCMPCIE_NOTFOUND 1
/** rx status */
uint32 rx_status_0;
uint32 rx_status_1;
- /** XOR checksum or a magic number to audit DMA done */
- /* This is for rev6 only. For IPC rev7, this is a reserved field */
- dma_done_t marker;
- /* timestamp */
- ipc_timestamp_t ts;
+
+ union { /* size per IPC = (3 x uint32) bytes */
+ struct {
+ /* used by Monitor mode */
+ uint32 marker;
+ /* timestamp */
+ ipc_timestamp_t ts;
+ };
+
+ /* LatTS_With_XORCSUM */
+ struct {
+ /* latency timestamp */
+ pktts_t rx_pktts;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker_ext;
+ };
+ };
} host_rxbuf_cmpl_t;
typedef union rxbuf_complete_item {
struct {
/** extended transmit flags */
uint8 ext_flags;
- uint8 rsvd1;
+ uint8 scale_factor;
/** user defined rate */
uint8 rate;
- uint8 rsvd2;
+ uint8 exp_time;
};
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */
#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */
+#define BCMPCIE_PKT_FLAGS_EPOCH_SHIFT 3u
+#define BCMPCIE_PKT_FLAGS_EPOCH_MASK (1u << BCMPCIE_PKT_FLAGS_EPOCH_SHIFT)
+
#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5
#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00
cmn_msg_hdr_t cmn_hdr;
/** completion message header */
compl_msg_hdr_t compl_hdr;
- union {
+
+ union { /* size per IPC = (3 x uint32) bytes */
+ /* Usage 1: TxS_With_TimeSync */
+ struct {
+ struct {
+ union {
+ /** provided meta data len */
+ uint16 metadata_len;
+ /** provided extended TX status */
+ uint16 tx_status_ext;
+ }; /*Ext_TxStatus */
+
+ /** WLAN side txstatus */
+ uint16 tx_status;
+ }; /* TxS */
+ /* timestamp */
+ ipc_timestamp_t ts;
+ }; /* TxS_with_TS */
+
+ /* Usage 2: LatTS_With_XORCSUM */
struct {
- union {
- /** provided meta data len */
- uint16 metadata_len;
- /** provided extended TX status */
- uint16 tx_status_ext;
- };
- /** WLAN side txstatus */
- uint16 tx_status;
+ /* latency timestamp */
+ pktts_t tx_pktts;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker_ext;
};
- /** XOR checksum or a magic number to audit DMA done */
- /* This is for rev6 only. For IPC rev7, this is not used */
- dma_done_t marker;
};
- /* timestamp */
- ipc_timestamp_t ts;
} host_txbuf_cmpl_t;
unsigned char check[D2HRING_TXCMPLT_ITEMSIZE];
} txbuf_complete_item_t;
+#define PCIE_METADATA_VER 1u
+
+/* version and length are not part of this structure.
+ * dhd queries version and length through bus iovar "bus:metadata_info".
+ */
+struct metadata_txcmpl_v1 {
+ uint32 tref; /* TSF or Ref Clock in uSecs */
+ uint16 d_t2; /* T2-fwt1 delta */
+ uint16 d_t3; /* T3-fwt1 delta */
+ uint16 d_t4; /* T4-fwt1 delta */
+ uint16 rsvd; /* reserved */
+};
+typedef struct metadata_txcmpl_v1 metadata_txcmpl_t;
+
#define BCMPCIE_D2H_METADATA_HDRLEN 4
#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4)
/*
* NVRAM variable manipulation
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Software-specific definitions shared between device and host side
* Explains the shared area between host and dongle
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmpcie.h 765083 2018-05-31 02:45:13Z $
+ * $Id: bcmpcie.h 821465 2019-05-23 19:50:00Z $
*/
#ifndef _bcmpcie_h_
#define PCIE_SHARED_IDLE_FLOW_RING 0x80000
#define PCIE_SHARED_2BYTE_INDICES 0x100000
-#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */
-#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002
-#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */
-#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */
-#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */
-#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */
-#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */
#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */
#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */
#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */
* field got added and the definition for these flags come here:
*/
/* WAR: D11 txstatus through unused status field of PCIe completion header */
-#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000
-#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000
+#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */
+#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002
+#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */
+#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */
+#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */
+#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 /* Fast Delete ring support */
+#define PCIE_SHARED2_EVTBUF_MAX_MASK 0x000000C0 /* 0:32, 1:64, 2:128, 3: 256 */
-#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001
+/* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */
+#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100
+#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */
+#define PCIE_SHARED2_FW_HC_ON_TRAP 0x00000400
+#define PCIE_SHARED2_HSCB 0x00000800 /* Host SCB support */
-#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002
+#define PCIE_SHARED2_EDL_RING 0x00001000 /* Support Enhanced Debug Lane */
+#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */
+#define PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x00004000 /* BT producer index reset WAR */
+#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000 /* Timestamp in packet */
-/* BT logging support */
-#define PCIE_SHARED2_BT_LOGGING 0x00000004
-/* BT/WLAN snapshot upload support */
-#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008
-/* submission count WAR */
-#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010
+#define PCIE_SHARED2_HP2P 0x00010000u /* HP2P feature */
+#define PCIE_SHARED2_HWA 0x00020000u /* HWA feature */
+#define PCIE_SHARED2_TRAP_ON_HOST_DB7 0x00040000u /* can take a trap on DB7 from host */
-/* Fast Delete ring support */
-#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020
+#define PCIE_SHARED2_DURATION_SCALE 0x00100000u
-/* Host SCB support */
-#define PCIE_SHARED2_HSCB 0x00000800
+#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000
+#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000
#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09
#define PCIE_SHARED_H2D_MAGIC 0x12345678
-#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100 /* using flags2 to indicate
- firmware support added to reuse
- timesync to update PKT txstatus
- */
-/* Support Enhanced Debug Lane */
-#define PCIE_SHARED2_EDL_RING 0x00001000
+typedef uint16 pcie_hwa_db_index_t; /* 16 bit HWA index (IPC Rev 7) */
+#define PCIE_HWA_DB_INDEX_SZ (2u) /* 2 bytes sizeof(pcie_hwa_db_index_t) */
/**
* Message rings convey messages between host and device. They are unidirectional, and are located
#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3
#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4
#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5
-#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6
+#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6
#define BCMPCIE_D2H_RING_TYPE_EDL 0x7
+#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8
+#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9
/**
* H2D and D2H, WR and RD index, are maintained in the following arrays:
uint16 max_vdevs; /* max number of virtual interfaces supported */
sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */
+
+ /* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */
+ uint32 h2d_hwa_db_idx_ptr; /* Array of all H2D ring's HWA DB indices */
+ uint32 d2h_hwa_db_idx_ptr; /* Array of all D2H ring's HWA DB indices */
+
} ring_info_t;
/**
/* location in host memory for offloaded modules */
sh_addr_t hoffload_addr;
+ uint32 flags3;
+ uint32 host_cap2;
+ uint32 host_cap3;
} pciedev_shared_t;
/* Device F/W provides the following access function:
#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000
/* Host support for enhanced debug lane */
#define HOSTCAP_EDL_RING 0x10000000
+#define HOSTCAP_PKT_TIMESTAMP 0x20000000
+#define HOSTCAP_PKT_HP2P 0x40000000
+#define HOSTCAP_HWA 0x80000000
+#define HOSTCAP2_DURATION_SCALE_MASK 0x0000003Fu
/* extended trap debug buffer allocation sizes. Note that this buffer can be used for
* other trap related purposes also.
/* D2H mail box Data */
#define D2H_DEV_D3_ACK 0x00000001
-#define D2H_DEV_DS_ENTER_REQ 0x00000002
-#define D2H_DEV_DS_EXIT_NOTE 0x00000004
-#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
+#define D2H_DEV_DS_ENTER_REQ 0x00000002
+#define D2H_DEV_DS_EXIT_NOTE 0x00000004
+#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
#define D2H_DEV_IDMA_INITED 0x00000010
+#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK
+#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
+#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE
+
+#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
+ D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED)
+#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK))
+
+/* trap data codes */
#define D2H_DEV_FWHALT 0x10000000
-#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000
-#define D2H_DEV_EXT_TRAP_DATA 0x20000000
-#define D2H_DEV_TRAP_IN_TRAP 0x40000000
-#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000
+#define D2H_DEV_EXT_TRAP_DATA 0x20000000
+#define D2H_DEV_TRAP_IN_TRAP 0x40000000
+#define D2H_DEV_TRAP_HOSTDB 0x80000000 /* trap as set by host DB */
+#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000
/* Indicates trap due to HMAP violation */
-#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000
+#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000
/* Indicates whether HMAP violation was Write */
-#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000
+#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000
+#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000
+#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */
-#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK
-#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
-#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE
#define D2HMB_FWHALT D2H_DEV_FWHALT
#define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP
#define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA
-#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */
-#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
- D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \
- D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA | D2H_DEV_TRAP_IN_TRAP)
-#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK))
/* Size of Extended Trap data Buffer */
#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096
/*
* Broadcom PCI-SPI Host Controller Register Definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Performance counters software interface.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Definitions for API from sdio common code (bcmsdh) to individual
* host controller drivers.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* export functions to client drivers
* abstract OS and BUS specific details of SDIO
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Broadcom SDIO/PCMCIA
* Software-specific definitions shared between device and host side
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#define SDPCM_SHARED_SET_BRPT 0x1000
#define SDPCM_SHARED_PENDING_BRPT 0x2000
#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000
+#define SDPCM_SHARED_RXLIM_POST 0x4000
typedef struct {
uint32 flags;
/*
* SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* 'Standard' SDIO HOST CONTROLLER driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom SPI Low-Level Hardware Driver API
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* SROM format definition.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Table that encodes the srom formats for PCI/PCIe NICs.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom Secure Standard Library.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#ifndef _bcmstdlib_s_h_
#define _bcmstdlib_s_h_
+#ifndef BWL_NO_INTERNAL_STDLIB_SUPPORT
#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \
defined(__STDC_WANT_LIB_EXT1__))
extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n);
extern size_t strlcpy(char *dest, const char *src, size_t size);
#endif // endif
extern size_t strlcat_s(char *dest, const char *src, size_t size);
-
+#endif /* !BWL_NO_INTERNAL_STDLIB_SUPPORT */
#endif /* _bcmstdlib_s_h_ */
/*
* Fundamental constants relating to TCP Protocol
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* TLV and XTLV support
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/* find the next tlv */
bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen);
+/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
+
+/* move buffer/buflen past the given tlv, or set to NULL/0 on error */
+void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
+
/* find the tlv for a given id */
bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key);
#define BCM_XTLV_OPTION_ALIGN32 0x0001 /* 32bit alignment of type.len.data */
#define BCM_XTLV_OPTION_IDU8 0x0002 /* shorter id */
#define BCM_XTLV_OPTION_LENU8 0x0004 /* shorted length */
+#define BCM_XTLV_OPTION_IDBE 0x0008 /* big endian format id */
+#define BCM_XTLV_OPTION_LENBE 0x0010 /* big endian format length */
typedef uint16 bcm_xtlv_opts_t;
/* header size. depends on options. Macros names ending w/ _EX are where
/*
* Fundamental constants relating to UDP Protocol
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Misc useful os-independent macros and functions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmutils.h 769659 2018-06-27 05:22:10Z $
+ * $Id: bcmutils.h 813798 2019-04-08 10:20:21Z $
*/
#ifndef _bcmutils_h_
#define DSCP_AF21 0x12
#define DSCP_AF22 0x14
#define DSCP_AF23 0x16
+/* CS2: OAM (RFC2474) */
+#define DSCP_CS2 0x10
/* AF3x: Multimedia Streaming (RFC2597) */
#define DSCP_AF31 0x1A
#define DSCP_AF32 0x1C
#define DSCP_AF33 0x1E
+/* CS3: Broadcast Video (RFC2474) */
+#define DSCP_CS3 0x18
+/* VA: VOCIE-ADMIT (RFC5865) */
+#define DSCP_VA 0x2C
/* EF: Telephony (RFC3246) */
#define DSCP_EF 0x2E
+/* CS6: Network Control (RFC2474) */
+#define DSCP_CS6 0x30
+/* CS7: Network Control (RFC2474) */
+#define DSCP_CS7 0x38
extern uint pktsetprio(void *pkt, bool update_vtag);
extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
#define SIZE_OF(type, field) sizeof(((type *)0)->field)
#ifndef ARRAYSIZE
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
+#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0]))
#endif // endif
#ifndef ARRAYLAST /* returns pointer to last array element */
return ((*a >> pos) & MSK); \
}
-DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
-DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
-DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003U) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000FU) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FFU) /* setbit8() and getbit8() */
/* basic mux operation - can be optimized on several architectures */
#define MUX(pred, true, false) ((pred) ? (true) : (false))
char *buf, uint32 bufsize);
extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
-extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+ __attribute__ ((format (__printf__, 2, 0)));
/* power conversion */
extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
while (u32arg) {
shifts++; u32arg >>= 1;
}
- return (32U - shifts);
+ return (32 - shifts);
}
/* the format of current TCM layout during boot
#endif // endif
#ifdef SHOW_LOGTRACE
#define TRACE_LOG_BUF_MAX_SIZE 1700
+#define RTT_LOG_BUF_MAX_SIZE 1700
#define BUF_NOT_AVAILABLE 0
#define NEXT_BUF_NOT_AVAIL 1
#define NEXT_BUF_AVAIL 2
/*
* Definitions for nl80211 vendor command/event access to host driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: brcm_nl80211.h 768773 2018-06-21 08:38:23Z $
+ * $Id: brcm_nl80211.h 787269 2018-11-01 11:46:31Z $
*
*/
#define OUI_GOOGLE 0x001A11
enum wl_vendor_subcmd {
- BRCM_VENDOR_SCMD_UNSPEC,
- BRCM_VENDOR_SCMD_PRIV_STR,
- BRCM_VENDOR_SCMD_BCM_STR,
- BRCM_VENDOR_SCMD_BCM_PSK
-};
-
-enum brcm_nl80211_vendor_events {
- BRCM_VENDOR_EVENT_UNSPEC,
- BRCM_VENDOR_EVENT_PRIV_STR,
- BRCM_VENDOR_EVENT_HANGED = 33,
- BRCM_VENDOR_EVENT_SAE_KEY = 34,
- BRCM_VENDOR_EVENT_BEACON_RECV = 35
+ BRCM_VENDOR_SCMD_UNSPEC = 0,
+ BRCM_VENDOR_SCMD_PRIV_STR = 1,
+ BRCM_VENDOR_SCMD_BCM_STR = 2,
+ BRCM_VENDOR_SCMD_BCM_PSK = 3,
+ BRCM_VENDOR_SCMD_SET_PMK = 4,
+ BRCM_VENDOR_SCMD_GET_FEATURES = 5,
+ BRCM_VENDOR_SCMD_MAX = 6
};
struct bcm_nlmsg_hdr {
* Dongle BUS interface Abstraction layer
* target serial buses like USB, SDIO, SPI, etc.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Header file for DHD daemon to handle timeouts
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_daemon.h 671442 2016-11-22 05:16:18Z $
+ * $Id: dhd_daemon.h 802947 2019-02-05 14:53:08Z $
*/
#ifndef __BCM_DHDD_H__
#define NO_TRAP 0
#define DO_TRAP 1
-#define BCM_NL_USER 31
+/* Keep common BCM netlink macros here */
+#define BCM_NL_USER 31
+#define BCM_NL_OXYGEN 30
+#define BCM_NL_TS 29
+/* ====== !! ADD NEW NL socket related defines here !! ====== */
typedef enum notify_dhd_daemon_reason {
REASON_COMMAND_TO,
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhdioctl.h 765806 2018-06-05 13:56:08Z $
+ * $Id: dhdioctl.h 800512 2019-01-22 09:31:01Z $
*/
#ifndef _dhdioctl_h_
DMA_XFER_FAILED
} dma_xfer_status_t;
+typedef enum d11_lpbk_type {
+ M2M_DMA_LPBK = 0,
+ D11_LPBK = 1,
+ BMC_LPBK = 2,
+ M2M_NON_DMA_LPBK = 3,
+ D11_HOST_MEM_LPBK = 4,
+ BMC_HOST_MEM_LPBK = 5,
+ MAX_LPBK = 6
+} dma_xfer_type_t;
+
+typedef struct dmaxfer_info {
+ uint16 version;
+ uint16 length;
+ dma_xfer_status_t status;
+ dma_xfer_type_t type;
+ uint src_delay;
+ uint dest_delay;
+ uint should_wait;
+ uint core_num;
+ int error_code;
+ uint32 num_bytes;
+ uint64 time_taken;
+ uint64 tput;
+} dma_xfer_info_t;
+
+#define DHD_DMAXFER_VERSION 0x1
+
typedef struct tput_test {
uint16 version;
uint16 length;
typedef struct debug_buf_dest_stat {
uint32 stat[DEBUG_BUF_DEST_MAX];
} debug_buf_dest_stat_t;
+
#endif /* _dhdioctl_h_ */
/*
- * Broadcom Event protocol definitions
+ * Broadcom Event protocol definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Dependencies: bcmeth.h
+ *
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * Dependencies: bcmeth.h
- *
- * $Id: dnglevent.h $
*
* <<Broadcom-WL-IPTag/Open:>>
*
+ * $Id: dnglevent.h $
+ *
* -----------------------------------------------------------------------------
*
*/
#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5
#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6
#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7
+#define HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND 0x8
-#define HC_PCIEDEV_CONFIG_REGLIST_MAX 20
+#define HC_PCIEDEV_CONFIG_REGLIST_MAX 25
typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc {
uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */
uint16 reserved;
* IEEE Std 802.1X-2001
* IEEE 802.1X RADIUS Usage Guidelines
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: eapol.h 767212 2018-06-13 00:17:23Z $
+ * $Id: eapol.h 809460 2019-03-14 00:35:24Z $
*/
#ifndef _eapol_h_
#define EAPOL_WPA_PMK_SHA384_LEN 48u
#define EAPOL_WPA_PMK_DEFAULT_LEN 32u
#define EAPOL_WPA_KCK_DEFAULT_LEN 16u
+#define EAPOL_WPA_KCK_SHA384_LEN 24u
#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u
+#define EAPOL_WPA_KCK_MIC_SHA384_LEN 24u
#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u
+#define EAPOL_WPA_KEK2_SHA256_LEN 16u
+#define EAPOL_WPA_KEK2_SHA384_LEN 32u
+#define EAPOL_WPA_KCK2_SHA256_LEN 16u
+#define EAPOL_WPA_KCK2_SHA384_LEN 24u
+
#ifndef EAPOL_KEY_HDR_VER_V2
#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */
#define EAPOL_WPA_KEY_LEN 95u /* deprecated */
#endif // endif
+#define EAPOL_PTK_KEY_MAX_LEN (EAPOL_WPA_KEY_MAX_MIC_LEN +\
+ EAPOL_WPA_ENCR_KEY_MAX_LEN +\
+ EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN +\
+ EAPOL_WPA_KCK2_SHA384_LEN +\
+ EAPOL_WPA_KEK2_SHA384_LEN)
+
#ifndef EAPOL_KEY_HDR_VER_V2
+
/* WPA EAPOL-Key : deprecated */
typedef BWL_PRE_PACKED_STRUCT struct {
unsigned char type; /* Key Descriptor Type */
#define WPA_KEY_SECURE 0x200
#define WPA_KEY_ERROR 0x400
#define WPA_KEY_REQ 0x800
+#define WPA_KEY_ENC_KEY_DATA 0x01000 /* Encrypted Key Data */
+#define WPA_KEY_SMK_MESSAGE 0x02000 /* SMK Message */
#define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u)
#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
/*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#define EPI_MINOR_VERSION 10
-#define EPI_RC_NUMBER 315
+#define EPI_RC_NUMBER 545
#define EPI_INCREMENTAL_NUMBER 0
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 100, 10, 315, 0
+#define EPI_VERSION 100, 10, 545, 0
-#define EPI_VERSION_NUM 0x640a13b0
+#define EPI_VERSION_NUM 0x640a2210
-#define EPI_VERSION_DEV 100.10.315
+#define EPI_VERSION_DEV 100.10.545
/* Driver Version String, ASCII, 32 chars max */
-#define EPI_VERSION_STR "100.10.315.3 (r771911)"
+#define EPI_VERSION_STR "100.10.545.4 (r826445-20190826-1) (amlogic-dbg-20190830-1)"
#endif /* _epivers_h_ */
/*
* Extended Trap data component interface file.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: etd.h 751605 2018-03-13 03:32:30Z $
+ * $Id: etd.h 813064 2019-04-03 11:29:38Z $
*/
#ifndef _ETD_H_
TAG_TRAP_LOG_DATA = 20,
TAG_TRAP_CODE = 21, /* The trap type */
TAG_TRAP_HMAP = 22, /* HMAP violation Address and Info */
+ TAG_TRAP_PCIE_ERR_ATTN = 23, /* PCIE error attn log */
+ TAG_TRAP_AXI_ERROR = 24, /* AXI Error */
+ TAG_TRAP_AXI_HOST_INFO = 25, /* AXI Host log */
+ TAG_TRAP_AXI_SR_ERROR = 26, /* AXI SR error log */
TAG_TRAP_LAST /* This must be the last entry */
} hnd_ext_tag_trap_t;
uint32 itipoobdout;
} hnd_ext_trap_bp_err_t;
+#define HND_EXT_TRAP_AXISR_INFO_VER_1 1
+typedef struct hnd_ext_trap_axi_sr_err_v1
+{
+ uint8 version;
+ uint8 pad[3];
+ uint32 error;
+ uint32 coreid;
+ uint32 baseaddr;
+ uint32 ioctrl;
+ uint32 iostatus;
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+
+ /* axi_sr_issue_debug */
+ uint32 sr_pwr_control;
+ uint32 sr_corereset_wrapper_main;
+ uint32 sr_corereset_wrapper_aux;
+ uint32 sr_main_gci_status_0;
+ uint32 sr_aux_gci_status_0;
+ uint32 sr_dig_gci_status_0;
+} hnd_ext_trap_axi_sr_err_v1_t;
+
#define HND_EXT_TRAP_PSMWD_INFO_VER 1
typedef struct hnd_ext_trap_psmwd_v1 {
uint16 xtag;
uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
} hnd_ext_trap_wlc_mem_err_v2_t;
+#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3
+
+typedef struct hnd_ext_trap_wlc_mem_err_v3 {
+ uint8 version;
+ uint8 instance;
+ uint8 stas_associated;
+ uint8 aps_associated;
+ uint8 soft_ap_client_cnt;
+ uint8 peer_cnt;
+ uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
+} hnd_ext_trap_wlc_mem_err_v3_t;
+
typedef struct hnd_ext_trap_pcie_mem_err {
uint16 d2h_queue_len;
uint16 d2h_req_queue_len;
} hnd_ext_trap_pcie_mem_err_t;
+#define MAX_DMAFIFO_ENTRIES_V1 1
+#define MAX_DMAFIFO_DESC_ENTRIES_V1 2
+#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe
+#define HND_EXT_TRAP_AXIERROR_VERSION_1 1
+
+/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */
+/* Below three structures are dependant, any change will bump version of all the three */
+
+typedef struct hnd_ext_trap_desc_entry_v1 {
+ uint32 ctrl1; /* descriptor entry at din < misc control bits > */
+ uint32 ctrl2; /* descriptor entry at din <buffer count and address extension> */
+ uint32 addrlo; /* descriptor entry at din <address of data buffer, bits 31:0> */
+ uint32 addrhi; /* descriptor entry at din <address of data buffer, bits 63:32> */
+} dma_dentry_v1_t;
+
+/* Structure to collect debug info about a dma channel on encountering AXI Error */
+typedef struct hnd_ext_trap_dma_fifo_v1 {
+ uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */
+ uint8 direction; /* TX=1, RX=2, currently only using TX */
+ uint16 index; /* Index of the DMA channel in system */
+ uint32 dpa; /* Expected Address of Descriptor table from software state */
+ uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */
+ uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */
+ uint16 din; /* rxin / txin */
+ uint16 dout; /* rxout / txout */
+ dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */
+} dma_fifo_v1_t;
+
+typedef struct hnd_ext_trap_axi_error_v1 {
+ uint8 version; /* version = 1 */
+ uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */
+ uint16 length; /* length of whole structure */
+ uint32 signature; /* indicate that its filled with AXI Error data */
+ uint32 axi_errorlog_status; /* errlog_status from slave wrapper */
+ uint32 axi_errorlog_core; /* errlog_core from slave wrapper */
+ uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */
+ uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */
+ uint32 axi_errorlog_id; /* errlog_id from slave wrapper */
+ dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1];
+} hnd_ext_trap_axi_error_v1_t;
+
#define HND_EXT_TRAP_MACSUSP_INFO_VER 1
typedef struct hnd_ext_trap_macsusp {
uint16 xtag;
uint16 PAD;
} hnd_ext_trap_macenab_t;
+#define HND_EXT_TRAP_PHY_INFO_VER_1 (1)
typedef struct hnd_ext_trap_phydbg {
uint16 err;
uint16 RxFeStatus;
uint16 core_mask;
} reg_dump_config_t;
-#define HND_EXT_TRAP_PHY_INFO_VER 2
+#define HND_EXT_TRAP_PHY_INFO_VER 2
typedef struct hnd_ext_trap_phydbg_v2 {
uint8 version;
uint8 len;
uint32 additional_regs[1];
} hnd_ext_trap_phydbg_v2_t;
+#define HND_EXT_TRAP_PHY_INFO_VER_3 (3)
+typedef struct hnd_ext_trap_phydbg_v3 {
+ uint8 version;
+ uint8 len;
+ uint16 err;
+ uint16 RxFeStatus;
+ uint16 TxFIFOStatus0;
+ uint16 TxFIFOStatus1;
+ uint16 RfseqMode;
+ uint16 RfseqStatus0;
+ uint16 RfseqStatus1;
+ uint16 RfseqStatus_Ocl;
+ uint16 RfseqStatus_Ocl1;
+ uint16 OCLControl1;
+ uint16 TxError;
+ uint16 bphyTxError;
+ uint16 TxCCKError;
+ uint16 TxCtrlWrd0;
+ uint16 TxCtrlWrd1;
+ uint16 TxCtrlWrd2;
+ uint16 TxLsig0;
+ uint16 TxLsig1;
+ uint16 TxVhtSigA10;
+ uint16 TxVhtSigA11;
+ uint16 TxVhtSigA20;
+ uint16 TxVhtSigA21;
+ uint16 txPktLength;
+ uint16 txPsdulengthCtr;
+ uint16 gpioClkControl;
+ uint16 gpioSel;
+ uint16 pktprocdebug;
+ uint32 gpioOut[3];
+ uint16 HESigURateFlagStatus;
+ uint16 HESigUsRateFlagStatus;
+ uint32 additional_regs[1];
+} hnd_ext_trap_phydbg_v3_t;
+
/* Phy TxErr Dump Structure */
#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1
#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2
uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */
} hnd_ext_trap_macphytxerr_v2_t;
+#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u)
+#define MAX_AER_HDR_LOG_REGS (4u)
+typedef struct hnd_ext_trap_pcie_err_attn_v1 {
+ uint8 version;
+ uint8 pad[3];
+ uint32 err_hdr_logreg1;
+ uint32 err_hdr_logreg2;
+ uint32 err_hdr_logreg3;
+ uint32 err_hdr_logreg4;
+ uint32 err_code_logreg;
+ uint32 err_type;
+ uint32 err_code_state;
+ uint32 last_err_attn_ts;
+ uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS];
+} hnd_ext_trap_pcie_err_attn_v1_t;
+
#define MAX_EVENTLOG_BUFFERS 48
typedef struct eventlog_trapdata_info {
uint32 num_elements;
/*
* From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* This file describes the payloads of event log entries that are data buffers
* rather than formatted string entries. The contents are generally XTLVs.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_payload.h 768232 2018-06-19 05:28:22Z $
+ * $Id: event_log_payload.h 825102 2019-06-12 22:26:41Z $
*/
#ifndef _EVENT_LOG_PAYLOAD_H_
#include <ethernet.h>
#include <event_log_tag.h>
+/**
+ * A (legacy) timestamp message
+ */
+typedef struct ts_message {
+ uint32 timestamp;
+ uint32 cyclecount;
+} ts_msg_t;
+
+/**
+ * Enhanced timestamp message
+ */
+typedef struct enhanced_ts_message {
+ uint32 version;
+ /* More data, depending on version */
+ uint8 data[];
+} ets_msg_t;
+
+#define ENHANCED_TS_MSG_VERSION_1 (1u)
+
+/**
+ * Enhanced timestamp message, version 1
+ */
+typedef struct enhanced_ts_message_v1 {
+ uint32 version;
+ uint32 timestamp; /* PMU time, in milliseconds */
+ uint32 cyclecount;
+ uint32 cpu_freq;
+} ets_msg_v1_t;
+
#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */
#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */
#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */
/* Scan flags */
#define SCAN_SUM_CHAN_INFO 0x1
/* Scan_sum flags */
-#define BAND5G_SIB_ENAB 0x2
-#define BAND2G_SIB_ENAB 0x4
-#define PARALLEL_SCAN 0x8
-#define SCAN_ABORT 0x10
+#define BAND5G_SIB_ENAB 0x2
+#define BAND2G_SIB_ENAB 0x4
+#define PARALLEL_SCAN 0x8
+#define SCAN_ABORT 0x10
+#define SC_LOWSPAN_SCAN 0x20
+#define SC_SCAN 0x40
/* scan_channel_info flags */
#define ACTIVE_SCAN_SCN_SUM 0x2
#define SCAN_SUM_WLC_CORE0 0x4
#define SCAN_SUM_WLC_CORE1 0x8
-#define HOME_CHAN 0x10
+#define HOME_CHAN 0x10
+#define SCAN_SUM_SCAN_CORE 0x20
typedef struct wl_scan_ssid_info
{
/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */
typedef enum {
- WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_RXMCSx2 = 1,
- WL_AMPDU_STATS_TYPE_RXMCSx3 = 2,
- WL_AMPDU_STATS_TYPE_RXMCSx4 = 3,
- WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_RXVHTx2 = 5,
- WL_AMPDU_STATS_TYPE_RXVHTx3 = 6,
- WL_AMPDU_STATS_TYPE_RXVHTx4 = 7,
- WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_TXMCSx2 = 9,
- WL_AMPDU_STATS_TYPE_TXMCSx3 = 10,
- WL_AMPDU_STATS_TYPE_TXMCSx4 = 11,
- WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_TXVHTx2 = 13,
- WL_AMPDU_STATS_TYPE_TXVHTx3 = 14,
- WL_AMPDU_STATS_TYPE_TXVHTx4 = 15,
- WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */
- WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */
- WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */
- WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */
- WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */
- WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */
- WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */
- WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */
- WL_AMPDU_STATS_MAX_CNTS = 64
+ WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_RXMCSx2 = 1,
+ WL_AMPDU_STATS_TYPE_RXMCSx3 = 2,
+ WL_AMPDU_STATS_TYPE_RXMCSx4 = 3,
+ WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_RXVHTx2 = 5,
+ WL_AMPDU_STATS_TYPE_RXVHTx3 = 6,
+ WL_AMPDU_STATS_TYPE_RXVHTx4 = 7,
+ WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_TXMCSx2 = 9,
+ WL_AMPDU_STATS_TYPE_TXMCSx3 = 10,
+ WL_AMPDU_STATS_TYPE_TXMCSx4 = 11,
+ WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */
+ WL_AMPDU_STATS_TYPE_TXVHTx2 = 13,
+ WL_AMPDU_STATS_TYPE_TXVHTx3 = 14,
+ WL_AMPDU_STATS_TYPE_TXVHTx4 = 15,
+ WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */
+ WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */
+ WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */
+ WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */
+ WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */
+ WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */
+ WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */
+ WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */
+ WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */
+ WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */
+ WL_AMPDU_STATS_TYPE_RX_HE_SUOK = 32, /* DL SU MPDU frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_SU_DENS = 33, /* DL SU AMPDU DENSITY */
+ WL_AMPDU_STATS_TYPE_RX_HE_MUMIMOOK = 34, /* DL MUMIMO Frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_MUMIMO_DENS = 35, /* DL MUMIMO AMPDU Density */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMAOK = 36, /* DL OFDMA Frame per MCS */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_DENS = 37, /* DL OFDMA AMPDU Density */
+ WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_HIST = 38, /* DL OFDMA frame RU histogram */
+ WL_AMPDU_STATS_TYPE_TX_HE_MCSALL = 39, /* TX HE (SU+MU) frames, all rates */
+ WL_AMPDU_STATS_TYPE_TX_HE_MCSOK = 40, /* TX HE (SU+MU) frames succeeded */
+ WL_AMPDU_STATS_TYPE_TX_HE_MUALL = 41, /* TX MU (UL OFDMA) frames all rates */
+ WL_AMPDU_STATS_TYPE_TX_HE_MUOK = 42, /* TX MU (UL OFDMA) frames succeeded */
+ WL_AMPDU_STATS_TYPE_TX_HE_RUBW = 43, /* TX UL RU by BW histogram */
+ WL_AMPDU_STATS_TYPE_TX_HE_PADDING = 44, /* TX padding total (single value) */
+ WL_AMPDU_STATS_MAX_CNTS = 64
} wl_ampdu_stat_enum_t;
typedef struct {
uint16 type; /* AMPDU statistics sub-type */
uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1];
} wl_ampdu_stats_aggrsz_t;
+/* Sub-block type for WL_IFSTATS_XTLV_HE_TXMU_STATS */
+typedef enum {
+ /* Reserve 0 to avoid potential concerns */
+ WL_HE_TXMU_STATS_TYPE_TIME = 1, /* per-dBm, total usecs transmitted */
+ WL_HE_TXMU_STATS_TYPE_PAD_TIME = 2, /* per-dBm, padding usecs transmitted */
+} wl_he_txmu_stat_enum_t;
+#define WL_IFSTATS_HE_TXMU_MAX 32u
+
/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */
#define WL_MSCH_PROFILER_START 0 /* start event check */
#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */
phy_periodic_log_core_t phy_perilog_core[1];
} phy_periodic_log_v1_t;
+/* Note: The version 2 is reserved for 4357 only. Future chips must not use this version. */
+
+#define MAX_CORE_4357 (2u)
+#define PHYCAL_LOG_VER2 (2u)
+#define PHY_PERIODIC_LOG_VER2 (2u)
+
+typedef struct {
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 rxstrt; /**< number of received frames with a good PLCP */
+ uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
+ uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+} phy_periodic_counters_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phycal_log_core_v2 {
+ uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
+ uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
+ uint16 ofdm_txd; /* contain di & dq */
+ uint16 rxa; /* Rx IQ Cal A coeffecient */
+ uint16 rxb; /* Rx IQ Cal B coeffecient */
+ uint8 baseidx; /* TPC Base index */
+ uint8 pad;
+ int32 rxs; /* FDIQ Slope coeffecient */
+} phycal_log_core_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phycal_log_v2 {
+ uint8 version; /* Logging structure version */
+ uint16 length; /* Length of the entire structure */
+ uint8 pad;
+ phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
+ phycal_log_core_v2_t phycal_log_core[MAX_CORE_4357];
+} phycal_log_v2_t;
+
+/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
+
+typedef struct phy_periodic_log_v2 {
+ uint8 version; /* Logging structure version */
+ uint16 length; /* Length of the entire structure */
+ uint8 pad;
+ phy_periodic_log_cmn_t phy_perilog_cmn;
+ phy_periodic_counters_v2_t counters_peri_log;
+ phy_periodic_log_core_t phy_perilog_core[MAX_CORE_4357];
+} phy_periodic_log_v2_t;
+
+/* Event log payload for enhanced roam log */
+typedef enum {
+ ROAM_LOG_SCANSTART = 1, /* EVT log for roam scan start */
+ ROAM_LOG_SCAN_CMPLT = 2, /* EVT log for roam scan completeted */
+ ROAM_LOG_ROAM_CMPLT = 3, /* EVT log for roam done */
+ ROAM_LOG_NBR_REQ = 4, /* EVT log for Neighbor REQ */
+ ROAM_LOG_NBR_REP = 5, /* EVT log for Neighbor REP */
+ ROAM_LOG_BCN_REQ = 6, /* EVT log for BCNRPT REQ */
+ ROAM_LOG_BCN_REP = 7, /* EVT log for BCNRPT REP */
+ PRSV_PERIODIC_ID_MAX
+} prsv_periodic_id_enum_t;
+
+typedef struct prsv_periodic_log_hdr {
+ uint8 version;
+ uint8 id;
+ uint16 length;
+} prsv_periodic_log_hdr_t;
+
+#define ROAM_LOG_VER_1 (1u)
+#define ROAM_LOG_TRIG_VER (1u)
+#define ROAM_SSID_LEN (32u)
+typedef struct roam_log_trig_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ int8 rssi;
+ uint8 current_cu;
+ uint8 pad[2];
+ uint reason;
+ int result;
+ union {
+ struct {
+ uint rcvd_reason;
+ } prt_roam;
+ struct {
+ uint8 req_mode;
+ uint8 token;
+ uint16 nbrlist_size;
+ uint32 disassoc_dur;
+ uint32 validity_dur;
+ uint32 bss_term_dur;
+ } bss_trans;
+ };
+} roam_log_trig_v1_t;
+
+#define ROAM_LOG_RPT_SCAN_LIST_SIZE 3
+#define ROAM_LOG_INVALID_TPUT 0xFFFFFFFFu
+typedef struct roam_scan_ap_info {
+ int8 rssi;
+ uint8 pad[3];
+ uint32 score;
+ uint16 chanspec;
+ struct ether_addr addr;
+ uint32 estm_tput;
+} roam_scan_ap_info_t;
+
+typedef struct roam_log_scan_cmplt_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ uint8 full_scan;
+ uint8 scan_count;
+ uint8 scan_list_size;
+ uint8 pad;
+ int32 score_delta;
+ roam_scan_ap_info_t cur_info;
+ roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE];
+} roam_log_scan_cmplt_v1_t;
+
+typedef struct roam_log_cmplt_v1 {
+ prsv_periodic_log_hdr_t hdr;
+ uint status;
+ uint reason;
+ uint16 chanspec;
+ struct ether_addr addr;
+ uint8 pad[3];
+ uint8 retry;
+} roam_log_cmplt_v1_t;
+
+typedef struct roam_log_nbrrep {
+ prsv_periodic_log_hdr_t hdr;
+ uint channel_num;
+} roam_log_nbrrep_v1_t;
+
+typedef struct roam_log_nbrreq {
+ prsv_periodic_log_hdr_t hdr;
+ uint token;
+} roam_log_nbrreq_v1_t;
+
+typedef struct roam_log_bcnrptreq {
+ prsv_periodic_log_hdr_t hdr;
+ int32 result;
+ uint8 reg;
+ uint8 channel;
+ uint8 mode;
+ uint8 bssid_wild;
+ uint8 ssid_len;
+ uint8 pad;
+ uint16 duration;
+ uint8 ssid[ROAM_SSID_LEN];
+} roam_log_bcnrpt_req_v1_t;
+
+typedef struct roam_log_bcnrptrep {
+ prsv_periodic_log_hdr_t hdr;
+ uint32 count;
+} roam_log_bcnrpt_rep_v1_t;
+
#endif /* _EVENT_LOG_PAYLOAD_H_ */
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_set.h 771154 2018-07-09 05:46:33Z $
+ * $Id: event_log_set.h 818566 2019-05-08 04:01:45Z $
*/
#ifndef _EVENT_LOG_SET_H_
#define _EVENT_LOG_SET_H_
-#ifndef NUM_EVENT_LOG_SETS
-/* Set a maximum number of sets here. It is not dynamic for
- * efficiency of the EVENT_LOG calls. Old branches could define
- * this to an appropriat enumber in their makefiles to reduce
- * ROM invalidation
- */
-#define NUM_EVENT_LOG_SETS (24)
-#endif // endif
-
/* Set assignments */
#define EVENT_LOG_SET_BUS (0u)
#define EVENT_LOG_SET_WL (1u)
/* Used for timestamp plotting, TS_LOG() */
#define EVENT_LOG_SET_TS_LOG (23u)
+/* BUS preserve chatty */
+#define EVENT_LOG_SET_PRSRV_BUS_CHATTY (24u)
+
+/* PRESERVE_PREIODIC_LOG_SET */
+/* flush if host is in D0 at every period */
+#define EVENT_LOG_SET_PRSV_PERIODIC (25u)
+
+#ifndef NUM_EVENT_LOG_SETS
+/* Set a maximum number of sets here. It is not dynamic for
+ * efficiency of the EVENT_LOG calls. Old branches could define
+ * this to an appropriat enumber in their makefiles to reduce
+ * ROM invalidation
+ */
+#ifdef NUM_EVENT_LOG_SETS_V2
+/* for v2, everything has became unsigned */
+#define NUM_EVENT_LOG_SETS (26u)
+#else /* NUM_EVENT_LOG_SETS_V2 */
+#define NUM_EVENT_LOG_SETS (26)
+#endif /* NUM_EVENT_LOG_SETS_V2 */
+#endif /* NUM_EVENT_LOG_SETS */
+
/* send delayed logs when >= 50% of buffer is full */
#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE
#define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50)
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_tag.h 771154 2018-07-09 05:46:33Z $
+ * $Id: event_log_tag.h 820429 2019-05-17 22:30:04Z $
*/
#ifndef _EVENT_LOG_TAG_H_
#define EVENT_LOG_TAG_PCI_DBG 52
#define EVENT_LOG_TAG_PCI_DATA 53
#define EVENT_LOG_TAG_PCI_RING 54
-/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA
- * keeping it here to avoid compilation error on trunk
- */
-#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55
#define EVENT_LOG_TAG_RANGING_TRACE 55
#define EVENT_LOG_TAG_WL_ERROR 56
#define EVENT_LOG_TAG_PHY_ERROR 57
#define EVENT_LOG_TAG_MIMO_PS_INFO 94
#define EVENT_LOG_TAG_BTCX_STATS 95
#define EVENT_LOG_TAG_LEAKY_AP_STATS 96
-#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97
#define EVENT_LOG_TAG_MIMO_PS_STATS 98
#define EVENT_LOG_TAG_PWRSTATS_PHY 99
#define EVENT_LOG_TAG_PWRSTATS_SCAN 100
-#define EVENT_LOG_TAG_PWRSTATS_AWDL 101
#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102
#define EVENT_LOG_TAG_LQM 103
#define EVENT_LOG_TAG_TRACE_WL_INFO 104
#define EVENT_LOG_TAG_STATS 153
#define EVENT_LOG_TAG_BAM 154
#define EVENT_LOG_TAG_TXFAIL 155
-#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156
-#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157
-#define EVENT_LOG_TAG_AWDL_PEER_DBG 158
#define EVENT_LOG_TAG_RANDMAC_INFO 159
#define EVENT_LOG_TAG_RANDMAC_DBG 160
#define EVENT_LOG_TAG_RANDMAC_ERR 161
-#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162
#define EVENT_LOG_TAG_MSCH_CAL 163
#define EVENT_LOG_TAG_MSCH_OPP_CAL 164
#define EVENT_LOG_TAG_MSCH 165
#define EVENT_LOG_TAG_PHY_ACI_INFO 208
#define EVENT_LOG_TAG_WL_COUNTERS_AUX 209
#define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210
-#define EVENT_LOG_TAG_PWRSTATS_AWDL_AUX 211
#define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212
#define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213
#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214
#define EVENT_LOG_TAG_FILS_ERROR 221
#define EVENT_LOG_TAG_HWA_TXPOST 222
#define EVENT_LOG_TAG_HWA_TXDMA 223
+#define EVENT_LOG_TAG_PPR_ERROR 224
+
/* Arbitrator callback log tags */
#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224
#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225
#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226
+#define EVENT_LOG_TAG_RTE_ERROR 227
+#define EVENT_LOG_TAG_CPLT_ERROR 228
+#define EVENT_LOG_TAG_DNGL_ERROR 229
+#define EVENT_LOG_TAG_NVRAM_ERROR 230
+#define EVENT_LOG_TAG_NAC 231
+#define EVENT_LOG_TAG_HP2P_ERR 232
+#define EVENT_LOG_TAG_SB_SCHED_DBG_SYNC 233
+#define EVENT_LOG_TAG_ENHANCED_TS 234
+
+/* Available space for new tags for Dingo, Iguana and branches
+ * prior to Koala only. From Koala onwards, new tags must be greater
+ * than 255. If a tag is required for Koala and legacy productization branches,
+ * add that tag here. Tags > 255 will generate extended header. Legacy code
+ * does not understand extended header.
+ */
/* Debug tags for making debug builds */
#define EVENT_LOG_TAG_DBG1 251
/* RTE */
#define EVENT_LOG_TAG_RTE_ERR 312
+/* TX FIFO */
+#define EVENT_LOG_TAG_FIFO_INFO 313
+
+/* PKTTS */
+#define EVENT_LOG_TAG_LATENCY_INFO 314
+
+/* TDLS */
+#define EVENT_LOG_TAG_WL_TDLS_INFO 315
+#define EVENT_LOG_TAG_WL_TDLS_DBG 316
+#define EVENT_LOG_TAG_WL_TDLS_ERR 317
+
+/* MSCH messages */
+#define EVENT_LOG_TAG_MSCH_DATASTRUCT 319
+#define EVENT_LOG_TAG_MSCH_REGISTER 320
+#define EVENT_LOG_TAG_MSCH_CALLBACK 321
+#define EVENT_LOG_TAG_MSCH_ERROR 322
+#define EVENT_LOG_TAG_MSCH_DEBUG 323
+#define EVENT_LOG_TAG_MSCH_INFORM 324
+#define EVENT_LOG_TAG_MSCH_TRACE 325
+
+/* bus low power related info messages */
+#define EVENT_LOG_TAG_WL_BUS_LP_INFO 326
+#define EVENT_LOG_TAG_PCI_LP_INFO 327
+
+/* SBSS BT-Coex */
+#define EVENT_LOG_TAG_SB_BTCX_INFO 328
+
+/* wbus */
+#define EVENT_LOG_TAG_WBUS_ERR 329
+#define EVENT_LOG_TAG_WBUS_INFO 330
+#define EVENT_LOG_TAG_WBUS_SCHED 331
+
+/* MODESW */
+#define EVENT_LOG_TAG_MODESW_ERR 332
+
+/* LPHS */
+#define EVENT_LOG_TAG_LPHS_ERR 333
+
+/* CPU statistics */
+#define EVENT_LOG_TAG_ARM_STAT 334
+
+/* Event log tags for SOE */
+#define EVENT_LOG_TAG_SOE_ERROR 335
+#define EVENT_LOG_TAG_SOE_INFO 336
+
+/* Event log tags for GCI Shared Memory */
+#define EVENT_LOG_TAG_GCISHM_ERR 337
+#define EVENT_LOG_TAG_GCISHM_INFO 338
+
+/* Eevent log tags for Enhanced Roam Log */
+#define EVENT_LOG_TAG_ROAM_ENHANCED_LOG 339
+
+/* WL BTCEC */
+#define EVENT_LOG_TAG_BTCEC_ERR 340
+#define EVENT_LOG_TAG_BTCEC_INFO 341
+#define EVENT_LOG_TAG_BTCEC_SCHED 342
+
/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
-#define EVENT_LOG_TAG_MAX 312
+#define EVENT_LOG_TAG_MAX 342
typedef enum wl_el_set_type_def {
EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */
/*
* Trace log blocks sent over HBUS
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental types and constants relating to FILS AUTHENTICATION
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND arm trap handling.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Console support for RTE - for host use only.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND Run Time Environment debug info area
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND Trap handling.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND SiliconBackplane chipcommon support - OS independent.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* HND SiliconBackplane PMU support.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Utility routines for configuring different memories in Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
--- /dev/null
+/*
+ * HND OOBR interface header
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndoobr.h 772387 2018-07-17 00:58:05Z $
+ */
+
+#ifndef _hndoobr_h_
+#define _hndoobr_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+/* for 'srcpidx' of hnd_oobr_get_intr_config() */
+#define HND_CORE_MAIN_INTR 0
+#define HND_CORE_ALT_INTR 1
+
+uint32 hnd_oobr_get_intstatus(si_t *sih);
+int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx);
+int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum);
+void hnd_oobr_init(si_t *sih);
+
+#define OOBR_INVALID_PORT 0xFFu
+
+/* per core source/dest sel reg */
+#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */
+#define OOBR_INTR_NUM_MASK 0x7Fu
+#define OOBR_INTR_EN 0x80u
+/* per core config reg */
+#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u
+#define OOBR_CORECNF_OUTPUT_SHIFT 8u
+#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u
+#define OOBR_CORECNF_INPUT_SHIFT 16u
+
+typedef volatile struct hndoobr_percore_reg {
+ uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */
+ uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */
+ uint32 reserved[6];
+ uint32 config; /* 0x38 */
+ uint32 reserved1[17]; /* 0x3c to 0x7c */
+} hndoobr_percore_reg_t;
+
+/* capability reg */
+#define OOBR_CAP_CORECNT_MASK 0x1fu
+typedef volatile struct hndoobr_reg {
+ uint32 capability; /* 0x00 */
+ uint32 reserved[3];
+ uint32 intstatus[4]; /* 0x10 - 0x1c */
+ uint32 reserved1[56]; /* 0x20 - 0xfc */
+ hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */
+} hndoobr_reg_t;
+
+#endif /* _hndoobr_h_ */
/*
* HND SiliconBackplane PMU support.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Broadcom HND chip & on-chip-interconnect-related definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hndsoc.h 672520 2016-11-28 23:30:55Z $
+ * $Id: hndsoc.h 795345 2018-12-18 16:52:03Z $
*/
#ifndef _HNDSOC_H
#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */
#define SYSMEM_CORE_ID 0x849 /* System memory core */
#define HUB_CORE_ID 0x84b /* Hub core ID */
+#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */
#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
#define EROM_CORE_ID 0x366 /* EROM core ID */
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linux_osl.h 749612 2018-03-01 08:51:26Z $
+ * $Id: linux_osl.h 815919 2019-04-22 09:06:50Z $
*/
#ifndef _linux_osl_h_
extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
extern void* osl_get_bus_handle(osl_t *osh);
#ifdef DHD_MAP_LOGGING
-extern void osl_dma_map_dump(void);
+extern void osl_dma_map_dump(osl_t *osh);
+#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh)
+#else
+#define OSL_DMA_MAP_DUMP(osh) do {} while (0)
#endif /* DHD_MAP_LOGGING */
/* Global ASSERT type */
#include <linuxver.h> /* use current 2.4.x calling conventions */
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
extern uint64 osl_sysuptime_us(void);
#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
#define OSL_SYSUPTIME_US() osl_sysuptime_us()
-#else
-#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
-#error "OSL_SYSUPTIME_US() may need to be defined"
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+extern uint64 osl_localtime_ns(void);
+extern void osl_get_localtime(uint64 *sec, uint64 *usec);
+extern uint64 osl_systztime_us(void);
+#define OSL_LOCALTIME_NS() osl_localtime_ns()
+#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec))
+#define OSL_SYSTZTIME_US() osl_systztime_us()
#define printf(fmt, args...) printk(fmt , ## args)
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x))
-#ifdef REPORT_FATAL_TIMEOUTS
+#ifndef _linuxver_h_
+typedef struct timer_list_compat timer_list_compat_t;
+#endif /* _linuxver_h_ */
typedef struct osl_timer {
- struct timer_list *timer;
+ timer_list_compat_t *timer;
bool set;
} osl_timer_t;
extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
-#endif
typedef atomic_t osl_atomic_t;
#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x)
#define OSL_ATOMIC_READ(osh, v) atomic_read(v)
#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x)
+#ifndef atomic_set_mask
+#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v)
+#else
+#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v)
+#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v)
+#endif // endif
+
+#include <linux/rbtree.h>
+
+typedef struct rb_node osl_rb_node_t;
+typedef struct rb_root osl_rb_root_t;
+
+#define OSL_RB_ENTRY(ptr, type, member) rb_entry(ptr, type, member)
+#define OSL_RB_INSERT_COLOR(root, node) rb_insert_color(root, node)
+#define OSL_RB_ERASE(node, root) rb_erase(node, root)
+#define OSL_RB_FIRST(root) rb_first(root)
+#define OSL_RB_LAST(root) rb_last(root)
+#define OSL_RB_LINK_NODE(node, parent, rb_link) \
+ rb_link_node(node, parent, rb_link)
+
+extern void *osl_spin_lock_init(osl_t *osh);
+extern void osl_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long osl_spin_lock(void *lock);
+extern void osl_spin_unlock(void *lock, unsigned long flags);
+
+typedef struct osl_timespec {
+ __kernel_time_t tv_sec; /* seconds */
+ __kernel_suseconds_t tv_usec; /* microseconds */
+ long tv_nsec; /* nanoseconds */
+} osl_timespec_t;
+extern void osl_do_gettimeofday(struct osl_timespec *ts);
+extern void osl_get_monotonic_boottime(struct osl_timespec *ts);
#endif /* _linux_osl_h_ */
/*
* Linux Packet (skb) interface
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Linux-specific abstractions to gain some independence from linux kernel versions.
* Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linuxver.h 767291 2018-06-13 06:35:04Z $
+ * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $
*/
#ifndef _linuxver_h_
#endif /* DMA mapping */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+
+typedef struct timer_list timer_list_compat_t;
+
+#define init_timer_compat(timer_compat, cb, priv) \
+ init_timer(timer_compat); \
+ (timer_compat)->data = (ulong)priv; \
+ (timer_compat)->function = cb
+#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
+#define timer_expires(timer_compat) (timer_compat)->expires
+
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
+
+typedef struct timer_list_compat {
+ struct timer_list timer;
+ void *arg;
+ void (*callback)(ulong arg);
+} timer_list_compat_t;
+
+extern void timer_cb_compat(struct timer_list *tl);
+
+#define init_timer_compat(timer_compat, cb, priv) \
+ (timer_compat)->arg = priv; \
+ (timer_compat)->callback = cb; \
+ timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
+#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
+#define timer_expires(timer_compat) (timer_compat)->timer.expires
+
+#define del_timer(t) del_timer(&((t)->timer))
+#define del_timer_sync(t) del_timer_sync(&((t)->timer))
+#define timer_pending(t) timer_pending(&((t)->timer))
+#define add_timer(t) add_timer(&((t)->timer))
+#define mod_timer(t, j) mod_timer(&((t)->timer), j)
+
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
+
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
#define dev_kfree_skb_any(a) dev_kfree_skb(a)
#define PROC_STOP(tsk_ctl) \
{ \
- uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
(tsk_ctl)->terminated = TRUE; \
smp_wmb(); \
up(&((tsk_ctl)->sema)); \
DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
if (timeout == 0) \
DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
{ \
- uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
(tsk_ctl)->terminated = TRUE; \
smp_wmb(); \
binary_sema_up(tsk_ctl); \
DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
if (timeout == 0) \
DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
*/
#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
{ \
- uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
+ uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
(tsk_ctl)->flush_ind = TRUE; \
smp_wmb(); \
binary_sema_up(tsk_ctl); \
DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
+ timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
if (timeout == 0) \
DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
/* ----------------------- */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
+/* send_sig declaration moved */
+#include <linux/sched/signal.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
+
#define KILL_PROC(nr, sig) \
{ \
struct task_struct *tsk; \
#endif // endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#include <linux/fs.h>
static inline struct inode *file_inode(const struct file *f)
{
return f->f_dentry->d_inode;
}
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
+#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
+int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
+
#endif /* _linuxver_h_ */
/*
* Chip related low power flags
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental types and constants relating to WFA MBO
* (Multiband Operation)
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Command line options parser.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Common interface to MSF (multi-segment format) definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Trace messages sent over HBUS
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Fundamental types and constants relating to WFA NAN
* (Neighbor Awareness Networking)
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: nan.h 758133 2018-04-17 19:07:15Z $
+ * $Id: nan.h 818571 2019-05-08 04:36:41Z $
*/
#ifndef _NAN_H_
#define _NAN_H_
uint8 avail_bmp[1];
} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t;
+/* Country code attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_country_code_attr_s {
+ /* Attribute ID - 0x0B. */
+ uint8 id;
+ /* Length of the following fields in the attribute */
+ uint16 len;
+ /* Condensed Country String first two octets */
+ uint8 country_str[2];
+} BWL_POST_PACKED_STRUCT wifi_nan_country_code_attr_t;
+
/* Further Availability MAP attr */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s {
/* Attribute ID - 0x0A. */
#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s {
- uint8 band[0];
+ uint8 band[1];
} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t;
/* Type of Availability: committed */
NDL_ATTR_TYPE_STATUS_ACCEPTED)
#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
NDL_ATTR_TYPE_STATUS_REJECTED)
+#define NAN_NDL_FRM_STATUS(_ndl) \
+ (((_ndl)->type_status & NAN_NDL_STATUS_MASK) >> NAN_NDL_STATUS_SHIFT)
#define NDL_ATTR_CTRL_NONE 0
#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT)
/* Schedule Update */
#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13
+#define NAN_SCHEDULE_AF(_naf_subtype) \
+ ((_naf_subtype >= NAN_MGMT_FRM_SUBTYPE_SCHED_REQ) && \
+ (_naf_subtype <= NAN_MGMT_FRM_SUBTYPE_SCHED_UPD))
+
/* Reason code defines */
#define NAN_REASON_RESERVED 0x0
#define NAN_REASON_UNSPECIFIED 0x1
NAN_NDP_STATUS_ACCEPT)
#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
NAN_NDP_STATUS_REJECT)
+
+#define NAN_NDP_FRM_STATUS(_ndp) \
+ (((_ndp)->type_status & NAN_NDP_STATUS_MASK) >> NAN_NDP_STATUS_SHIFT)
+
/* NDP Setup Status */
#define NAN_NDP_SETUP_STATUS_OK 1
#define NAN_NDP_SETUP_STATUS_FAIL 0
#define NAN_NDP_SETUP_STATUS_REJECT 2
+/* NDPE TLV list */
+#define NDPE_TLV_TYPE_IPV6 0x00
+#define NDPE_TLV_TYPE_SVC_INFO 0x01
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndpe_tlv_s {
+ uint8 type; /* Operating Class */
+ uint16 length; /* Channel Bitmap */
+ uint8 data[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ndpe_tlv_t;
+
/* Rng setup attribute type and status macros */
#define NAN_RNG_TYPE_MASK 0x0F
#define NAN_RNG_TYPE_REQUEST 0x0
/*
* OS Abstraction Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: osl.h 768071 2018-06-18 09:23:56Z $
+ * $Id: osl.h 813810 2019-04-08 12:25:30Z $
*/
#ifndef _osl_h_
#define OSL_SYSUPTIME_SUPPORT TRUE
#endif /* OSL_SYSUPTIME */
+#ifndef OSL_GET_LOCALTIME
+#define OSL_GET_LOCALTIME(sec, usec) \
+ do { \
+ BCM_REFERENCE(sec); \
+ BCM_REFERENCE(usec); \
+ } while (0)
+#endif /* OSL_GET_LOCALTIME */
+
+#ifndef OSL_LOCALTIME_NS
+#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC)
+#endif /* OSL_LOCALTIME_NS */
+
+#ifndef OSL_SYSTZTIME_US
+#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US()
+#endif /* OSL_GET_SYSTZTIME */
+
#ifndef OSL_SYS_HALT
#define OSL_SYS_HALT() do {} while (0)
#endif // endif
/*
* osl forward declarations
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* OS Abstraction Layer Extension - the APIs defined by the "extension" API
* are only supported by a subset of all operating systems.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: packed_section_end.h 666738 2016-10-24 12:16:37Z $
+ * $Id: packed_section_end.h 776894 2018-08-16 05:50:57Z $
*/
/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: packed_section_start.h 666738 2016-10-24 12:16:37Z $
+ * $Id: packed_section_start.h 776894 2018-08-16 05:50:57Z $
*/
#ifndef _alignment_test_
/*
* pcicfg.h: PCI configuration constants and structures.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: pcicfg.h 756835 2018-04-10 21:24:10Z $
+ * $Id: pcicfg.h 795237 2018-12-18 03:26:49Z $
*/
#ifndef _h_pcicfg_
#define PCI_CAP_MSICAP_ID 0x05
#define PCI_CAP_VENDSPEC_ID 0x09
#define PCI_CAP_PCIECAP_ID 0x10
+#define PCI_CAP_MSIXCAP_ID 0x11
/* Data structure to define the Message Signalled Interrupt facility
* Valid for PCI and PCIE configurations
/* PCIE Extended configuration */
#define PCIE_ADV_CORR_ERR_MASK 0x114
+#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14
#define CORR_ERR_RE (1 << 0) /* Receiver */
-#define CORR_ERR_BT (1 << 6) /* Bad TLP */
+#define CORR_ERR_BT (1 << 6) /* Bad TLP */
#define CORR_ERR_BD (1 << 7) /* Bad DLLP */
#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */
#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */
+#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */
#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
CORR_ERR_RR | CORR_ERR_RT)
#define PCI_PHY_DBG_CLKREG_3 0x1e1c
/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */
+#define PCIE_BAR1COHERENTACCEN_BIT 8
+#define PCIE_BAR2COHERENTACCEN_BIT 9
#define PCIE_SSRESET_STATUS_BIT 13
#define PCIE_SSRESET_DISABLE_BIT 14
#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15
+#define PCIE_BARCOHERENTACCEN_MASK 0x300
+
/* Bit settings for PCI_UC_ERR_STATUS register */
#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */
#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */
/*
* BCM43XX PCIE core hardware definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: pcie_core.h 770722 2018-07-04 11:24:12Z $
+ * $Id: pcie_core.h 792442 2018-12-05 00:20:53Z $
*/
#ifndef _PCIE_CORE_H
#define _PCIE_CORE_H
uint32 clk_ctl_st; /* 0xAE0 */
uint32 PAD[1]; /* 0xAE4 */
uint32 powerctl; /* 0xAE8 */
+ uint32 PAD[5]; /* 0xAEC-0xAFF */
} dar;
/* corerev > = 64 */
struct {
uint32 erraddr; /* 0xA64 */
uint32 mbox_int; /* 0xA68 */
uint32 fis_ctrl; /* 0xA6C */
+ uint32 PAD[36]; /* 0xA70-0xAFF */
} dar_64;
} u1;
+ uint32 PAD[64]; /* 0xB00-0xBFF */
+ /* Function Control/Status Registers for corerev >= 64 */
+ /* 0xC00 - 0xCFF */
+ struct {
+ uint32 control; /* 0xC00 */
+ uint32 iostatus; /* 0xC04 */
+ uint32 capability; /* 0xC08 */
+ uint32 PAD[1]; /* 0xC0C */
+ uint32 intstatus; /* 0xC10 */
+ uint32 intmask; /* 0xC14 */
+ uint32 pwr_intstatus; /* 0xC18 */
+ uint32 pwr_intmask; /* 0xC1C */
+ uint32 msi_vector; /* 0xC20 */
+ uint32 msi_intmask; /* 0xC24 */
+ uint32 msi_intstatus; /* 0xC28 */
+ uint32 msi_pend_cnt; /* 0xC2C */
+ uint32 mbox_intstatus; /* 0xC30 */
+ uint32 mbox_intmask; /* 0xC34 */
+ uint32 ltr_state; /* 0xC38 */
+ uint32 PAD[1]; /* 0xC3C */
+ uint32 intr_vector; /* 0xC40 */
+ uint32 intr_addrlow; /* 0xC44 */
+ uint32 intr_addrhigh; /* 0xC48 */
+ uint32 PAD[45]; /* 0xC4C-0xCFF */
+ } ftn_ctrl;
} sbpcieregs_t;
#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */
#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */
#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */
+/* Function control (corerev > 64) */
+#define PCIE_CPLCA_ENABLE 0x01
+/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */
+#define PCIE_DLY_PERST_TO_COE 0x02
+/* when set, PERST is holding asserted until sprom-related register updates has completed */
+
#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
#define PCIE_SWPME_FN0 0x10000
#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */
#define PCIH2D_DB1_2 0x164
#define PCID2H_MailBox_2 0x168
+#define PCIE_CLK_CTRL 0x1E0
#define PCIE_PWR_CTRL 0x1E8
-#define PCIE_CLK_CTRL 0x1E8
#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00)
/* for corerev < 64 idma_en is in PCIControl regsiter */
#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48)
#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C)
#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20)
+#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24)
+#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4)
+#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8)
#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0)
#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8)
#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20)
/*
* HND Run Time Environment ioctl.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
* GPIO interface, extbus, and support for serial and parallel flashes.
*
- * $Id: sbchipc.h 763883 2018-05-22 17:57:56Z $
+ * $Id: sbchipc.h 825481 2019-06-14 10:06:03Z $
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#define PMU_PLL_CONTROL_DATA 0x664
#define CC_SROM_CTRL 0x190
+#define CC_SROM_ADDRESS 0x194u
+#define CC_SROM_DATA 0x198u
#ifdef SROM16K_4364_ADDRSPACE
#define CC_SROM_OTP 0xa000 /* SROM/OTP address space */
#else
#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF
#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647
-/* 4367 related */
-#define RES4367_ABUCK 0
-#define RES4367_CBUCK 1
-#define RES4367_MISCLDO_PU 2
-#define RES4367_VBOOST 3
-#define RES4367_LDO3P3_PU 4
-#define RES4367_LAST_LPO_AVAIL 5
-#define RES4367_XTAL_PU 6
-#define RES4367_XTAL_STABLE 7
-#define RES4367_PWRSW_DIG 8
-#define RES4367_SR_DIG 9
-#define RES4367_SPARE10 10
-#define RES4367_PWRSW_AUX 11
-#define RES4367_SR_AUX 12
-#define RES4367_SPARE2 13
-#define RES4367_PWRSW_MAIN 14
-#define RES4367_SR_MAIN 15
-#define RES4367_ARMPLL_PWRUP 16
-#define RES4367_DIG_CORE_RDY 17
-#define RES4367_CORE_RDY_AUX 18
-#define RES4367_ALP_AVAIL 19
-#define RES4367_RADIO_AUX_PU 20
-#define RES4367_MINIPMU_AUX_PU 21
-#define RES4367_CORE_RDY_MAIN 22
-#define RES4367_RADIO_MAIN_PU 23
-#define RES4367_MINIPMU_MAIN_PU 24
-#define RES4367_PCIE_RET 25
-#define RES4367_COLD_START_WAIT 26
-#define RES4367_ARMPLL_HTAVAIL 27
-#define RES4367_HT_AVAIL 28
-#define RES4367_MACPHY_AUX_CLK_AVAIL 29
-#define RES4367_MACPHY_MAIN_CLK_AVAIL 30
-#define RES4367_RESERVED_31 31
-
-#define CST4367_SPROM_PRESENT (1 << 17)
-
/* 43430 PMU resources based on pmu_params.xls */
#define RES43430_LPLDO_PU 0
#define RES43430_BG_PU 1
#define CR4_4369_RAM_BASE (0x170000)
#define CR4_4377_RAM_BASE (0x170000)
#define CR4_43751_RAM_BASE (0x170000)
+#define CR4_43752_RAM_BASE (0x170000)
#define CA7_4367_RAM_BASE (0x200000)
#define CR4_4378_RAM_BASE (0x352000)
#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */
-#define SRPWR_DMN_ALL_MASK (0xF)
+
+#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */
+#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */
+
+/* all power domain mask */
+#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih)
#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */
-#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT)
+#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT)
+
#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */
-#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT)
-#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */
-#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT)
+#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT)
+
+#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */
+#define SRPWR_DMN_ID_MASK (0xF)
/* PMU Precision Usec Timer */
#define PMU_PREC_USEC_TIMER_ENABLE 0x1
/*
* Broadcom SiliconBackplane hardware register definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* SiliconBackplane GCI core hardware definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
--- /dev/null
+/*
+ * Broadcom SiliconBackplane ARM definitions
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbhndarm.h 799498 2019-01-16 06:02:27Z $
+ */
+
+#ifndef _sbhndarm_h_
+#define _sbhndarm_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* cortex-m3 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corestatus; /* 0x4 */
+ uint32 PAD[1];
+ uint32 biststatus; /* 0xc */
+ uint32 nmiisrst; /* 0x10 */
+ uint32 nmimask; /* 0x14 */
+ uint32 isrmask; /* 0x18 */
+ uint32 PAD[1];
+ uint32 resetlog; /* 0x20 */
+ uint32 gpioselect; /* 0x24 */
+ uint32 gpioenable; /* 0x28 */
+ uint32 PAD[1];
+ uint32 bpaddrlo; /* 0x30 */
+ uint32 bpaddrhi; /* 0x34 */
+ uint32 bpdata; /* 0x38 */
+ uint32 bpindaccess; /* 0x3c */
+ uint32 ovlidx; /* 0x40 */
+ uint32 ovlmatch; /* 0x44 */
+ uint32 ovladdr; /* 0x48 */
+ uint32 PAD[13];
+ uint32 bwalloc; /* 0x80 */
+ uint32 PAD[3];
+ uint32 cyclecnt; /* 0x90 */
+ uint32 inttimer; /* 0x94 */
+ uint32 intmask; /* 0x98 */
+ uint32 intstatus; /* 0x9c */
+ uint32 PAD[80];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 PAD[1];
+ uint32 powerctl; /* 0x1e8 */
+} cm3regs_t;
+#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg)
+
+/* cortex-R4 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corecapabilities; /* 0x4 */
+ uint32 corestatus; /* 0x8 */
+ uint32 biststatus; /* 0xc */
+ uint32 nmiisrst; /* 0x10 */
+ uint32 nmimask; /* 0x14 */
+ uint32 isrmask; /* 0x18 */
+ uint32 swintreg; /* 0x1C */
+ uint32 intstatus; /* 0x20 */
+ uint32 intmask; /* 0x24 */
+ uint32 cyclecnt; /* 0x28 */
+ uint32 inttimer; /* 0x2c */
+ uint32 gpioselect; /* 0x30 */
+ uint32 gpioenable; /* 0x34 */
+ uint32 PAD[2];
+ uint32 bankidx; /* 0x40 */
+ uint32 bankinfo; /* 0x44 */
+ uint32 bankstbyctl; /* 0x48 */
+ uint32 bankpda; /* 0x4c */
+ uint32 PAD[6];
+ uint32 tcampatchctrl; /* 0x68 */
+ uint32 tcampatchtblbaseaddr; /* 0x6c */
+ uint32 tcamcmdreg; /* 0x70 */
+ uint32 tcamdatareg; /* 0x74 */
+ uint32 tcambankxmaskreg; /* 0x78 */
+ uint32 PAD[89];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 PAD[1];
+ uint32 powerctl; /* 0x1e8 */
+} cr4regs_t;
+#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg)
+
+/* cortex-A7 */
+typedef volatile struct {
+ uint32 corecontrol; /* 0x0 */
+ uint32 corecapabilities; /* 0x4 */
+ uint32 corestatus; /* 0x8 */
+ uint32 tracecontrol; /* 0xc */
+ uint32 PAD[8];
+ uint32 gpioselect; /* 0x30 */
+ uint32 gpioenable; /* 0x34 */
+ uint32 PAD[106];
+ uint32 clk_ctl_st; /* 0x1e0 */
+ uint32 PAD[1];
+ uint32 powerctl; /* 0x1e8 */
+} ca7regs_t;
+#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg)
+
+#if defined(__ARM_ARCH_7M__)
+#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg)
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__)
+#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg)
+#endif /* __ARM_ARCH_7R__ */
+
+#if defined(__ARM_ARCH_7A__)
+#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg)
+#endif /* __ARM_ARCH_7A__ */
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+#endif /* _sbhndarm_h_ */
* Generic Broadcom Home Networking Division (HND) DMA engine HW interface
* This supports the following chips: BCM42xx, 44xx, 47xx .
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
* device core support
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* BCM47XX Sonics SiliconBackplane embedded ram core
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* SiliconBackplane System Memory core
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* SDIO spec header file
* Protocol and standard (common) device definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* SDIO Host Controller Spec header file
* Register map and definitions for the Standard Host Controller
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Structure used by apps whose drivers access SDIO drivers.
* Pulled out separately so dhdu and wlu can both use it.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* SD-SPI Protocol Standard
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Misc utility routines for accessing the SOC Interconnects
* of Broadcom HNBU chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils.h 769534 2018-06-26 21:19:11Z $
+ * $Id: siutils.h 798061 2019-01-04 23:27:15Z $
*/
#ifndef _siutils_h_
#define ARMCR4_TCBANB_SHIFT 0
#define SICF_CPUHALT (0x0020)
-#define ARMCR4_BSZ_MASK 0x3f
-#define ARMCR4_BSZ_MULT 8192
+#define ARMCR4_BSZ_MASK 0x7f
+#define ARMCR4_BUNITSZ_MASK 0x200
+#define ARMCR4_BSZ_8K 8192
+#define ARMCR4_BSZ_1K 1024
#define SI_BPIND_1BYTE 0x1
#define SI_BPIND_2BYTE 0x3
#define SI_BPIND_4BYTE 0xF
extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
extern volatile void *si_setcoreidx(si_t *sih, uint coreidx);
extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern uint32 si_oobr_baseaddr(si_t *sih, bool second);
extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
extern int si_numaddrspaces(si_t *sih);
extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_srpwr_stat(si_t *sih);
extern uint32 si_srpwr_domain(si_t *sih);
+extern uint32 si_srpwr_domain_all_mask(si_t *sih);
/* SR Power Control */
/* No capabilities bit so using chipid for now */
* ARM, TCM, Main, Aux
* Host needs to power up
*/
+#define MULTIBP_CAP(sih) (FALSE)
#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable)
uint32 si_enum_base(uint devid);
void si_clrirq_idx(si_t *sih, uint core_idx);
+/* return if scan core is present */
+bool si_scan_core_present(si_t *sih);
+
#endif /* _siutils_h_ */
/*
* SPI device spec header file
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* TRX image file header format.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* 802.1Q VLAN protocol definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl.h 771856 2018-07-12 05:11:56Z $
+ * $Id: wlioctl.h 824900 2019-06-12 05:42:13Z $
*/
#ifndef _wlioctl_h_
#include <bcm_mpool_pub.h>
#include <bcmcdc.h>
+#define SSSR_NEW_API
+
+/* Include bcmerror.h for error codes or aliases */
+#ifdef BCMUTILS_ERR_CODES
+#include <bcmerror.h>
+#endif /* BCMUTILS_ERR_CODES */
/* NOTE re: Module specific error codes.
*
*
* The error codes -4096 ... -5119 are reserved for firmware signing.
*
- * Next available (inclusive) range: [-7*1024 + 1, -6*1024]
+ * Next available (inclusive) range: [-8*1024 + 1, -7*1024]
*
* End Note
*/
uint8 bsscolor; /**<bsscolor value from 0 to 63 */
uint8 partial_bsscolor_ind;
uint8 disable_bsscolor_ind; /**< To disable particular bsscolor */
+ /* bsscolor_disable to be added as part of D1.0 */
uint16 staid_info[HE_MAX_STAID_PER_BSSCOLOR]; /**< 0-3 staid info of each bsscolor */
} wl_bsscolor_info_t;
#define WLC_EXTDSCAN_MAX_SSID 5
typedef struct wl_extdscan_params {
- int8 nprobes; /**< 0, passive, otherwise active */
- int8 split_scan; /**< split scan */
+ int8 nprobes; /**< 0, passive, otherwise active */
+ int8 split_scan; /**< split scan */
int8 band; /**< band */
int8 pad;
- wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
+ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
uint32 tx_rate; /**< in 500ksec units */
wl_scan_type_t scan_type; /**< enum */
- int32 channel_num;
+ int32 channel_num;
chan_scandata_t channel_list[1]; /**< list of chandata structs */
} wl_extdscan_params_t;
-#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
-#define WL_SCAN_PARAMS_SSID_MAX 10
+#define WL_SCAN_PARAMS_SSID_MAX 10
-typedef struct wl_scan_params {
+struct wl_scan_params {
wlc_ssid_t ssid; /**< default: {0, ""} */
struct ether_addr bssid; /**< default: bcast */
int8 bss_type; /**< default: any,
* the fixed portion is ignored
*/
uint16 channel_list[1]; /**< list of chanspecs */
-} wl_scan_params_t;
+};
+
+/* changes in wl_scan_params_v2 as comapred to wl_scan_params (v1)
+* unit8 scantype to uint32
+*/
+typedef struct wl_scan_params_v2 {
+ uint16 version; /* Version of wl_scan_params, change value of
+ * WL_SCAN_PARAM_VERSION on version update
+ */
+ uint16 length; /* length of structure wl_scan_params_v1_t
+ * without implicit pad
+ */
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 PAD;
+ uint32 scan_type; /**< flags, 0 use default, and flags specified in
+ * WL_SCANFLAGS_XXX
+ */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /**< -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /**< -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /**< count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /**< list of chanspecs */
+} wl_scan_params_v2_t;
+
+#define WL_SCAN_PARAMS_VERSION_V2 2
/** size of wl_scan_params not including variable length array */
-#define WL_SCAN_PARAMS_FIXED_SIZE 64
-#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+#define WL_SCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_scan_params_v2_t, channel_list))
+#define WL_MAX_ROAMSCAN_DATSZ \
+ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+#define WL_MAX_ROAMSCAN_V2_DATSZ \
+ (WL_SCAN_PARAMS_V2_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
#define ISCAN_REQ_VERSION 1
+#define ISCAN_REQ_VERSION_V2 2
+
+/** incremental scan struct */
+struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ struct wl_scan_params params;
+};
/** incremental scan struct */
-typedef struct wl_iscan_params {
+typedef struct wl_iscan_params_v2 {
uint32 version;
uint16 action;
uint16 scan_duration;
- wl_scan_params_t params;
-} wl_iscan_params_t;
+ wl_scan_params_v2_t params;
+} wl_iscan_params_v2_t;
/** 3 fields + size of wl_scan_params, not including variable length array */
-#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ISCAN_PARAMS_V2_FIXED_SIZE \
+ (OFFSETOF(wl_iscan_params_v2_t, params) + sizeof(wlc_ssid_t))
typedef struct wl_scan_results {
uint32 buflen;
} iscan_buf_t;
#endif /* SIMPLE_ISCAN */
#define ESCAN_REQ_VERSION 1
+#define ESCAN_REQ_VERSION_V2 2
/** event scan reduces amount of SOC memory needed to store scan results */
-typedef struct wl_escan_params {
+struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ struct wl_scan_params params;
+};
+
+typedef struct wl_escan_params_v2 {
uint32 version;
uint16 action;
uint16 sync_id;
- wl_scan_params_t params;
-} wl_escan_params_t;
+ wl_scan_params_v2_t params;
+} wl_escan_params_v2_t;
#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+#define WL_ESCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_escan_params_v2_t, params) + sizeof(wlc_ssid_t))
+
+/* New scan version is defined then change old version of scan to
+ * wl_scan_params_v1_t and new one to wl_scan_params_t
+ */
+#ifdef WL_SCAN_PARAMS_V2
+typedef struct wl_scan_params wl_scan_params_v1_t;
+typedef struct wl_escan_params wl_escan_params_v1_t;
+typedef struct wl_iscan_params wl_iscan_params_v1_t;
+typedef struct wl_scan_params_v2 wl_scan_params_t;
+typedef struct wl_escan_params_v2 wl_escan_params_t;
+typedef struct wl_iscan_params_v2 wl_iscan_params_t;
+#define WL_SCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_scan_params_t, channel_list))
+#else
+typedef struct wl_scan_params wl_scan_params_t;
+typedef struct wl_escan_params wl_escan_params_t;
+typedef struct wl_iscan_params wl_iscan_params_t;
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#endif // endif
/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_result {
#define RATESET_ARGS_V1 (1)
#define RATESET_ARGS_V2 (2)
+/* RATESET_VERSION_ENABLED is defined in wl.mk post J branch.
+ * Guidelines to use wl_rateset_args_t:
+ * [a] in wlioctl.h: Add macro RATESET_ARGS_VX where X is the new version number.
+ * [b] in wlioctl.h: Add a new structure with wl_rateset_args_vX_t
+ * [c] in wlu.c app: Add support to parse new structure under RATESET_ARGS_VX
+ * [d] in wlc_types.h: in respective branch and trunk: redefine wl_rateset_args_t with
+ * new wl_rateset_args_vX_t
+ */
#ifndef RATESET_VERSION_ENABLED
/* rateset structure before versioning. legacy. DONOT update anymore here */
#define RATESET_ARGS_VERSION (RATESET_ARGS_V1)
} wlc_antselcfg_t;
typedef struct {
- uint32 duration; /**< millisecs spent sampling this channel */
- uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_obss; /**< traffic not in our bss */
- uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
- uint32 timestamp; /**< second timestamp */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ union {
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_me; /**< millisecs in my own traffic */
+ };
+ union {
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 congest_notme; /**< traffic not from/to me (including bc/mc) */
+ };
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /**< second timestamp */
} cca_congest_t;
typedef struct {
- chanspec_t chanspec; /**< Which channel? */
- uint16 num_secs; /**< How many secs worth of data */
- cca_congest_t secs[1]; /**< Data */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ cca_congest_t secs[1]; /**< Data */
} cca_congest_channel_req_t;
+
+typedef struct {
+ uint32 timestamp; /**< second timestamp */
+
+ /* Base structure of cca_congest_t: CCA statistics all inclusive */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for non PM only */
+ uint32 duration_nopm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_nopm; /**< traffic not in our bss */
+ uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */
+
+ /* CCA statistics for during PM only */
+ uint32 duration_pm; /**< millisecs spent sampling this channel */
+ uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */
+ uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss_pm; /**< traffic not in our bss */
+ uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */
+} cca_congest_ext_t;
+
+#define WL_CCA_EXT_REQ_VER 0
+typedef struct {
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< len of this structure */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ cca_congest_ext_t secs[1]; /**< Data - 3 sets for ALL - non-PM - PM */
+} cca_congest_ext_channel_req_t;
+
typedef struct {
uint32 duration; /**< millisecs spent sampling this channel */
uint32 congest; /**< millisecs detecting busy CCA */
uint32 timestamp; /**< second timestamp */
} cca_congest_simple_t;
+/* The following two structure must have same first 4 fields.
+ * The cca_chan_qual_event_t is used to report CCA in older formats and NF.
+ * The cca_only_chan_qual_event_t is used to report CCA only with newer format.
+ */
typedef struct {
uint16 status;
uint16 id;
- chanspec_t chanspec; /**< Which channel? */
+ chanspec_t chanspec; /**< Which channel? */
uint16 len;
union {
- cca_congest_simple_t cca_busy; /**< CCA busy */
- cca_congest_t cca_busy_ext; /**< Extended CCA report */
- int32 noise; /**< noise floor */
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ int32 noise; /**< noise floor */
};
} cca_chan_qual_event_t;
+typedef struct {
+ uint16 status;
+ uint16 id;
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ struct {
+ cca_congest_t cca_busy_ext; /**< Extended CCA report */
+ cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */
+ cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */
+ };
+ };
+} cca_only_chan_qual_event_t;
+
typedef struct {
uint32 msrmnt_time; /**< Time for Measurement (msec) */
uint32 msrmnt_done; /**< flag set when measurement complete */
wl_leap_info_t leap_info[1];
} wl_leap_list_t;
#endif /* BCMCCX */
+
typedef enum sup_auth_status {
/* Basic supplicant authentication states */
WLC_SUP_DISCONNECTED = 0,
uint16 PAD;
} wl_wsec_key_t;
+/* Min length for PSK passphrase */
#define WSEC_MIN_PSK_LEN 8
+/* Max length of supported passphrases for PSK */
#define WSEC_MAX_PSK_LEN 64
+/* Max length of supported passphrases for SAE */
+#define WSEC_MAX_PASSPHRASE_LEN 256u
-/** Flag for key material needing passhash'ing */
-#define WSEC_PASSPHRASE (1<<0)
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE 1u
+/* Flag indicating an SAE passphrase */
+#define WSEC_SAE_PASSPHRASE 2u
/**receptacle for WLC_SET_WSEC_PMK parameter */
typedef struct wsec_pmk {
- ushort key_len; /**< octets in key material */
- ushort flags; /**< key handling qualification */
- uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */
+ ushort key_len; /* octets in key material */
+ ushort flags; /* key handling qualification */
+ uint8 key[WSEC_MAX_PASSPHRASE_LEN]; /* PMK material */
} wsec_pmk_t;
#define WL_AUTH_EVENT_DATA_V1 0x1
#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs)
-#define FILS_CACHE_ID_LEN 2
-#define PMK_LEN_MAX 48
+#define WL_PMKSA_EVENT_DATA_V1 1u
+
+/* tlv ids for PMKSA event */
+#define WL_PMK_TLV_ID 1u
+#define WL_PMKID_TLV_ID 2u
+#define WL_PEER_ADDR_TLV_ID 3u
+
+/* PMKSA event data structure */
+typedef struct wl_pmksa_event {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_pmksa_event_t;
+
+#define WL_PMKSA_EVENT_FIXED_LEN_V1 OFFSETOF(wl_pmksa_event_t, xtlvs)
+
+#define FILS_CACHE_ID_LEN 2u
+#define PMK_LEN_MAX 48u
typedef struct _pmkid_v1 {
struct ether_addr BSSID;
} pmkid_v2_t;
#define PMKID_LIST_VER_2 2
+
+typedef struct _pmkid_v3 {
+ struct ether_addr bssid;
+ uint8 pmkid[WPA2_PMKID_LEN];
+ uint8 pmkid_len;
+ uint8 pmk[PMK_LEN_MAX];
+ uint8 pmk_len;
+ uint16 fils_cache_id; /* 2-byte length */
+ uint8 pad;
+ uint8 ssid_len;
+ uint8 ssid[DOT11_MAX_SSID_LEN]; /* For FILS, to save ESSID */
+ /* one pmkid used in whole ESS */
+ uint32 time_left; /* remaining time until expirary in sec. */
+ /* 0 means expired, all 0xFF means never expire */
+} pmkid_v3_t;
+
+#define PMKID_LIST_VER_3 3
typedef struct _pmkid_list_v1 {
uint32 npmkid;
pmkid_v1_t pmkid[1];
pmkid_v2_t pmkid[1];
} pmkid_list_v2_t;
+typedef struct _pmkid_list_v3 {
+ uint16 version;
+ uint16 length;
+ uint16 count;
+ uint16 pad;
+ pmkid_v3_t pmkid[];
+} pmkid_list_v3_t;
+
#ifndef PMKID_VERSION_ENABLED
/* pmkid structure before versioning. legacy. DONOT update anymore here */
typedef pmkid_v1_t pmkid_t;
uint8 PAD;
} wl_pm_mute_tx_t;
+/*
+ * Please update the following when modifying this structure:
+ * StaInfo Twiki page flags section - description of the sta_info_t struct
+ * src/wl/exe/wlu.c - print of sta_info_t
+ * Pay attention to version if structure changes.
+ */
+
/* sta_info_t version 4 */
typedef struct {
uint16 ver; /**< version of this struct */
wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */
} sta_info_v5_t;
+/*
+ * Please update the following when modifying this structure:
+ * StaInfo Twiki page flags section - description of the sta_info_t struct
+ * src/wl/exe/wlu.c - print of sta_info_t
+ * Pay attention to version if structure changes.
+ */
+
/* sta_info_t version 6
changes to wl_rateset_args_t is leading to update this struct version as well.
*/
wl_rateset_args_v2_t rateset_adv; /* rateset along with mcs index bitmap */
} sta_info_v6_t;
+/* define to help support one version older sta_info_t from user level
+ * applications.
+ */
#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts)
#define WL_STA_VER_4 4
uint8 num_pkts; /**< Number of packet entries to be averaged */
} wl_mac_ratehisto_cmd_t;
/** Get MAC rate histogram response */
+/* deprecated after JAGUAR branch */
typedef struct {
uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */
uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */
#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT)
#define WL_NUM_RATES_HE 12
#define WL_NUM_RATES_MCS32 1
-#define UC_URL_LEN 128u /**< uCode URL length */
+#define UC_PATH_LEN 128u /**< uCode path length */
/*
* Structure for passing hardware and software
uint32 drvrev_rc_inc; /**< driver version: rc incremental */
uint16 ucodeprebuilt; /**< uCode prebuilt flag */
uint16 ucodediffct; /**< uCode diff count */
- uchar ucodeurl[UC_URL_LEN]; /**< uCode repo URL@cmt_id */
+ uchar ucodeurl[128u]; /* obsolete, kept for ROM compatiblity */
+ uchar ucodepath[UC_PATH_LEN]; /**< uCode URL or path */
} wlc_rev_info_t;
#define WL_REV_INFO_LEGACY_LENGTH 48
} wl_aci_args_t;
#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */
+
+#define WL_MACFIFO_PLAY_ARGS_T_VERSION 1u /* version of wl_macfifo_play_args_t struct */
+
+enum wl_macfifo_play_flags {
+ WL_MACFIFO_PLAY_STOP = 0x00u, /* stop playing samples */
+ WL_MACFIFO_PLAY_START = 0x01u, /* start playing samples */
+ WL_MACFIFO_PLAY_LOAD = 0x02u, /* for set: load samples
+ for get: samples are loaded
+ */
+ WL_MACFIFO_PLAY_GET_MAX_SIZE = 0x10u, /* get the macfifo buffer size */
+ WL_MACFIFO_PLAY_GET_STATUS = 0x20u, /* get macfifo play status */
+};
+
+typedef struct wl_macfifo_play_args {
+ uint16 version; /* structure version */
+ uint16 len; /* size of structure */
+ uint16 flags;
+ uint8 PAD[2];
+ uint32 data_len; /* data length */
+} wl_macfifo_play_args_t;
+
+#define WL_MACFIFO_PLAY_DATA_T_VERSION 1u /* version of wl_macfifo_play_data_t struct */
+
+typedef struct wl_macfifo_play_data {
+ uint16 version; /* structure version */
+ uint16 len; /* size of structure */
+ uint32 data_len; /* data length */
+} wl_macfifo_play_data_t;
+
#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */
typedef struct wl_samplecollect_args {
/* version 0 fields */
int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */
} wl_snr_ant_t;
+/* Weighted average support */
+#define WL_WA_VER 0 /* Initial version - Basic WA algorithm only */
+
+#define WL_WA_ALGO_BASIC 0 /* Basic weighted average algorithm (all 4 metrics) */
+#define WL_WA_TYPE_RSSI 0
+#define WL_WA_TYPE_SNR 1
+#define WL_WA_TYPE_TXRATE 2
+#define WL_WA_TYPE_RXRATE 3
+#define WL_WA_TYPE_MAX 4
+
+typedef struct { /* payload of subcmd in xtlv */
+ uint8 id;
+ uint8 n_total; /* Total number of samples (n_total >= n_recent) */
+ uint8 n_recent; /* Number of samples denoted as recent */
+ uint8 w_recent; /* Total weight for the recent samples (as percentage) */
+} wl_wa_basic_params_t;
+
+typedef struct {
+ uint16 ver;
+ uint16 len;
+ uint8 subcmd[]; /* sub-cmd in bcm_xtlv_t */
+} wl_wa_cmd_t;
+
/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */
typedef struct {
uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */
#define WL_TXPPR_VERSION 1
#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
#define TX_POWER_T_VERSION 45
-/** number of ppr serialization buffers, it should be reg, board and target */
-#define WL_TXPPR_SER_BUF_NUM (3)
+
/* curpower ppr types */
enum {
PPRTYPE_TARGETPOWER = 1,
PPRTYPE_BOARDLIMITS = 2,
- PPRTYPE_REGLIMITS = 3
+ PPRTYPE_REGLIMITS = 3,
+ PPRTYPE_RU_REGLIMITS = 4,
+ PPRTYPE_RU_BOARDLIMITS = 5,
+ PPRTYPE_RU_TARGETPOWER = 6,
+ PPRTYPE_LAST
};
+/** number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM (PPRTYPE_LAST - 1)
+
typedef struct chanspec_txpwr_max {
chanspec_t chanspec; /**< chanspec */
uint8 txpwr_max; /**< max txpwr in all the rates */
#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0
#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0)
#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1)
-#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2)
#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3)
#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4)
#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5)
/* Bits for disable_reasons */
#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */
#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */
-#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* AWDL or NAN active */
+#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* slot_bss active */
#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */
#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */
#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */
#define NFIFO 6 /**< # tx/rx fifopairs */
+#ifndef NFIFO_EXT
#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED)
-#if defined(WL_MU_TX) && !defined(WL_MU_TX_DISABLED)
-#define NFIFO_EXT 32 /* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */
-#else
#define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */
-#endif // endif
#elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED)
#define NFIFO_EXT 10
#else
#define NFIFO_EXT NFIFO
#endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */
+#endif /* NFIFO_EXT */
+/* When new reason codes are added to list, Please update wl_reinit_names also */
/* Reinit reason codes */
enum {
WL_REINIT_RC_NONE = 0,
WL_REINIT_RC_LAST /* This must be the last entry */
};
+#define WL_REINIT_RC_INVALID 255
+
#define NREINITREASONCOUNT 8
+/* NREINITREASONCOUNT is 8 in other branches.
+ * Any change to this will break wl tool compatibility with other branches
+ * #define NREINITREASONCOUNT WL_REINIT_RC_LAST
+ */
#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0)
/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */
#define FIRST_COUNTER_OFFSET 0x04
+/* need for now due to src/wl/ndis automerged to other branches. e.g. BISON */
#define WLC_WITH_XTLV_CNT
/* Number of xtlv info as required to calculate subcounter offsets */
-#define WL_CNT_XTLV_ID_NUM 10
+#define WL_CNT_XTLV_ID_NUM 12
#define WL_TLV_IOV_VER 1
/**
WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension */
WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */
WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */
+ WL_CNT_XTLV_WLC_HE_OMI = 0x104, /* he omi counters */
WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */
WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */
WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */
WL_STATE_COMPACT_HE_COUNTERS = 0x3
};
+/* Sub tlvs for chan_counters */
+enum wl_periodic_chan_xtlv_id {
+ WL_CHAN_GENERIC_COUNTERS = 0x1,
+ WL_CHAN_PERIODIC_COUNTERS = 0x2
+};
+
+#ifdef WLC_CHAN_ECNTR_TEST
+#define WL_CHAN_PERIODIC_CNTRS_VER_1 1
+typedef struct wlc_chan_periodic_cntr
+{
+ uint16 version;
+ uint16 pad;
+ uint32 rxstrt;
+} wlc_chan_periodic_cntr_t;
+#endif /* WLC_CHAN_ECNTR_TEST */
+
+#define WL_CHANCNTR_HDR_VER_1 1
+typedef struct wlc_chan_cntr_hdr_v1
+{
+ uint16 version;
+ uint16 pad;
+ chanspec_t chanspec; /* Dont add any fields above this */
+ uint16 pad1;
+ uint32 total_time;
+ uint32 chan_entry_cnt;
+} wlc_chan_cntr_hdr_v1_t;
+
/* tlv IDs uniquely identifies periodic state component */
enum wl_periodic_if_state_xtlv_id {
WL_STATE_IF_COMPACT_STATE = 0x1,
WL_STATE_IF_ADPS_STATE = 0x02
};
+enum wl_periodic_tdls_if_state_xtlv_id {
+ WL_STATE_IF_TDLS_STATE = 0x1
+};
+
#define TDMTX_CNT_VERSION_V1 1
#define TDMTX_CNT_VERSION_V2 2
#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t))
+#define WL_CNT_HE_OMI_STRUCT_SZ ((uint32)sizeof(wl_he_omi_cnt_wlc_v1_t))
#define INVALID_CNT_VAL (uint32)(-1)
#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \
#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t))
+/* Please refer to the twiki for counters addition/deletion.
+ * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition
+ */
+
/** Top structure of counters IOVar buffer */
typedef struct {
uint16 version; /**< see definition of WL_CNT_T_VERSION */
uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
uint32 pmk_badlen_cnt; /* number of invalid pmk len */
-
+ uint32 txbar_notx; /* number of TX BAR not sent (maybe supressed or muted) */
+ uint32 txbar_noack; /* number of TX BAR sent, but not acknowledged by peer */
+ uint32 rxfrag_agedout; /**< # of aged out rx fragmentation */
+
+ /* Do not remove or rename in the middle of this struct.
+ * All counter variables have to be of uint32.
+ * Please follow the instruction in
+ * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition
+ */
} wl_cnt_wlc_t;
/* he counters Version 1 */
} wl_he_cnt_wlc_v2_t;
/* he counters Version 3 */
+#define WL_RU_TYPE_MAX 6
#define HE_COUNTERS_V3 (3)
+
typedef struct wl_he_cnt_wlc_v3 {
uint16 version;
uint16 len;
uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
* because of no frames in fifo's
*/
+ uint32 he_myAID_cnt;
+ uint32 he_rxtrig_bfm_cnt;
+ uint32 he_rxtrig_mubar;
+ uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
+ uint32 txheru[WL_RU_TYPE_MAX];
+ uint32 he_mgmt_tbppdu;
+ uint32 he_cs_req_tx_cancel;
+ uint32 he_wrong_nss;
+ uint32 he_trig_unsupp_rate;
+ uint32 he_rxtrig_nfrp;
+ uint32 he_rxtrig_bqrp;
+ uint32 he_rxtrig_gcrmubar;
} wl_he_cnt_wlc_v3_t;
+/* he counters Version 4 */
+#define HE_COUNTERS_V4 (4)
+typedef struct wl_he_cnt_wlc_v4 {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
+ uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
+ uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
+ uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
+ uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
+ uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
+ uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
+ uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
+ uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
+ uint32 he_txtrig; /**< transmission of trigger frames */
+ uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
+ uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
+ uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
+ uint32 he_rxtsrt_hemuppdu_cnt; /**< rxing HE MU PPDU */
+ uint32 he_physu_rx; /**< reception of SU frame */
+ uint32 he_phyru_rx; /**< reception of RU frame */
+ uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
+ uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
+ uint32 he_rxstrt_hesuppdu_cnt; /**< rxing SU PPDU */
+ uint32 he_rxstrt_hesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */
+ uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger
+ * because of zero aggregation
+ */
+ uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */
+ uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
+ * because of no frames in fifo's
+ */
+ uint32 he_myAID_cnt;
+ uint32 he_rxtrig_bfm_cnt;
+ uint32 he_rxtrig_mubar;
+ uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
+ uint32 txheru[WL_RU_TYPE_MAX];
+ uint32 he_mgmt_tbppdu;
+ uint32 he_cs_req_tx_cancel;
+ uint32 he_wrong_nss;
+ uint32 he_trig_unsupp_rate;
+ uint32 he_rxtrig_nfrp;
+ uint32 he_rxtrig_bqrp;
+ uint32 he_rxtrig_gcrmubar;
+ uint32 he_rxtrig_basic_htpack; /**< triggers received with HTP ack policy */
+ uint32 he_rxtrig_ed_cncl; /**< count of cancelled packets
+ * becasue of cs_req in trigger frame
+ */
+ uint32 he_rxtrig_suppr_null_tbppdu; /**< count of null frame sent becasue of
+ * suppression scenarios
+ */
+ uint32 he_ulmu_disable; /**< number of UL MU disable scenario's handled in ucode */
+ uint32 he_ulmu_data_disable; /**<number of UL MU data disable scenarios
+ * handled in ucode
+ */
+} wl_he_cnt_wlc_v4_t;
+
#ifndef HE_COUNTERS_VERSION_ENABLED
#define HE_COUNTERS_VERSION (HE_COUNTERS_V1)
typedef wl_he_cnt_wlc_v1_t wl_he_cnt_wlc_t;
#endif /* HE_COUNTERS_VERSION_ENABLED */
+/* he omi counters Version 1 */
+#define HE_OMI_COUNTERS_V1 (1)
+typedef struct wl_he_omi_cnt_wlc_v1 {
+ uint16 version;
+ uint16 len;
+ uint32 he_omitx_sched; /* Count for total number of OMIs scheduled */
+ uint32 he_omitx_success; /* Count for OMI Tx success */
+ uint32 he_omitx_retries; /* Count for OMI retries as TxDone not set */
+ uint32 he_omitx_dur; /* Accumulated duration of OMI completion time */
+ uint32 he_omitx_ulmucfg; /* count for UL MU enable/disable change req */
+ uint32 he_omitx_ulmucfg_ack; /* count for UL MU enable/disable req txed successfully */
+ uint32 he_omitx_txnsts; /* count for Txnsts change req */
+ uint32 he_omitx_txnsts_ack; /* count for Txnsts change req txed successfully */
+ uint32 he_omitx_rxnss; /* count for Rxnss change req */
+ uint32 he_omitx_rxnss_ack; /* count for Rxnss change req txed successfully */
+ uint32 he_omitx_bw; /* count for BW change req */
+ uint32 he_omitx_bw_ack; /* count for BW change req txed successfully */
+ uint32 he_omitx_ersudis; /* count for ER SU enable/disable req */
+ uint32 he_omitx_ersudis_ack; /* count for ER SU enable/disable req txed successfully */
+ uint32 he_omitx_dlmursdrec; /* count for Resound recommendation change req */
+ uint32 he_omitx_dlmursdrec_ack; /* count for Resound recommendation req txed successfully */
+} wl_he_omi_cnt_wlc_v1_t;
+
+/* WL_IFSTATS_XTLV_WL_SLICE_TXBF */
+/* beamforming counters version 1 */
+#define TXBF_ECOUNTERS_V1 (1u)
+#define WL_TXBF_CNT_ARRAY_SZ (8u)
+typedef struct wl_txbf_ecounters_v1 {
+ uint16 version;
+ uint16 len;
+ /* transmit beamforming stats */
+ uint16 txndpa; /* null data packet announcements */
+ uint16 txndp; /* null data packets */
+ uint16 txbfpoll; /* beamforming report polls */
+ uint16 txsf; /* subframes */
+ uint16 txcwrts; /* contention window rts */
+ uint16 txcwcts; /* contention window cts */
+ uint16 txbfm;
+ /* receive beamforming stats */
+ uint16 rxndpa_u; /* unicast NDPAs */
+ uint16 rxndpa_m; /* multicast NDPAs */
+ uint16 rxbfpoll; /* unicast bf-polls */
+ uint16 bferpt; /* beamforming reports */
+ uint16 rxsf;
+ uint16 rxcwrts;
+ uint16 rxcwcts;
+ uint16 rxtrig_bfpoll;
+ uint16 unused_uint16; /* pad */
+ /* sounding stats - interval capture */
+ uint16 rxnontb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* non-TB sounding for last 8 captures */
+ uint16 rxtb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* TB sounding count for last 8 captures */
+ uint32 cap_dur_ms[WL_TXBF_CNT_ARRAY_SZ]; /* last 8 capture durations (in ms) */
+ uint32 cap_last_ts; /* timestamp of last sample capture */
+} wl_txbf_ecounters_v1_t;
+
/* security vulnerabilities counters */
typedef struct {
uint32 ie_unknown; /* number of unknown IEs */
uint32 rxdrop20s; /**< drop secondary cnt */
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
} wl_cnt_ge40mcst_v1_t;
/** MACSTAT counters for ucode (corerev < 40) */
uint32 phywatch;
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
} wl_cnt_lt40mcst_v1_t;
/** MACSTAT counters for ucode (corerev >= 80) */
uint32 PAD[6];
uint32 rxerr_stat;
uint32 ctx_fifo_full;
- uint32 PAD[38]; /* PAD added for counter elements to be added soon */
+ uint32 PAD0[9];
+ uint32 ctmode_ufc_cnt;
+ uint32 PAD1[28]; /* PAD added for counter elements to be added soon */
} wl_cnt_ge80mcst_v1_t;
typedef struct {
} wl_cnt_ge80_txfunfl_v1_t;
/** MACSTAT counters for "wl counter" version <= 10 */
+/* With ucode before its macstat cnts cleaned up */
typedef struct {
/* MAC counters: 32-bit version of d11.h's macstat_t */
uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
uint32 rxdrop20s; /**< drop secondary cnt */
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+ /* All counter variables have to be of uint32. */
} wl_cnt_v_le10_mcst_t;
#define MAX_RX_FIFO 3
uint32 rxbcast; /* BroadcastReceivedFrameCount */
uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */
+ /* This structure is deprecated and used only for ver <= 11.
+ * All counter variables have to be of uint32.
+ * Please refer to the following twiki before editing.
+ * http://hwnbu-twiki.sj.broadcom.com/bin/view/
+ * Mwgroup/WlCounters#wlc_layer_counters_non_xTLV
+ */
} wl_cnt_ver_11_t;
typedef struct {
* fifo because a probe response could not be sent out within
* the time limit defined in M_PRS_MAXTIME
*/
- uint32 rxnack;
- uint32 frmscons;
+ uint32 rxnack; /**< Number of NACKS received (Afterburner) */
+ uint32 frmscons; /**< Number of frames completed without transmission because of an
+ * Afterburner re-queue
+ */
uint32 txnack; /**< obsolete */
uint32 rxback; /**< blockack rxcnt */
uint32 txback; /**< blockack txcnt */
uint32 rxmpdu_stbc; /**< count for stbc received */
uint32 rxdrop20s; /**< drop secondary cnt */
+ /* All counter variables have to be of uint32. */
} wl_cnt_ver_6_t;
#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */
#define WL_MKEEP_ALIVE_VERSION 1
#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
+/* 1/2 second precision since idle time is a seconds counter anyway */
#define WL_MKEEP_ALIVE_PRECISION 500
#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF
#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000
uint32 stop_ptr; /* Stop address to store */
uint8 optn_bmp; /* Options */
uint8 PAD[3];
+ /* Don't change the order after this nor
+ * add anything in betw. Code uses offsets to populate
+ * registers
+ */
uint32 tr_mask; /* Trigger Mask */
uint32 tr_val; /* Trigger Value */
uint32 s_mask; /* Store Mode Mask */
wl_apf_program_t apf_program; /* apf program */
wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */
} u;
+ /* Do NOT add structure members after the filter definitions, since they
+ * may include variable length arrays.
+ */
} wl_pkt_filter_t;
/** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
* used for report in struct cca_chan_qual_event_t. So the ID values beyond 8-bit are used
* for reporting purpose only.
*/
-#define WL_CHAN_QUAL_FULL_CCA (0x100 | WL_CHAN_QUAL_CCA)
+#define WL_CHAN_QUAL_FULL_CCA (0x100u | WL_CHAN_QUAL_CCA) /* CCA: ibss vs. obss */
+#define WL_CHAN_QUAL_FULLPM_CCA (0x200u | WL_CHAN_QUAL_CCA) /* CCA: me vs. notme, PM vs. !PM */
#define MAX_CHAN_QUAL_LEVELS 8
/** IOVar parameter block for small MAC address array with type indicator */
#define WL_IOV_MAC_PARAM_LEN 4
+/** This value is hardcoded to be 16 and MUST match PKTQ_MAX_PREC value defined elsewhere */
#define WL_IOV_PKTQ_LOG_PRECS 16
#include <packed_section_start.h>
} wl_iov_mac_full_params_t;
/** Parameter block for PKTQ_LOG statistics */
+/* NOTE: this structure cannot change! It is exported to wlu as a binary format
+ * A new format revision number must be created if the interface changes
+ * The latest is v05; previous v01...v03 are no longer supported, v04 has
+ * common base with v05
+*/
#define PKTQ_LOG_COUNTERS_V4 \
/* packets requested to be stored */ \
uint32 requested; \
#define NBR_ADD_DYNAMIC 1
#define WL_RRM_NBR_RPT_VER 1
+
+#define WL_NBR_RPT_FLAG_BSS_PREF_FROM_AP 0x01
/** 11k Neighbor Report element */
typedef struct nbr_rpt_elem {
uint8 version;
#define WLC_PMD_CHK_UNALIGN_TBTT 0x100
#define WLC_PMD_APSD_STA_UP 0x200
#define WLC_PMD_TX_PEND_WAR 0x400 /* obsolete, can be reused */
+#define WLC_PMD_NAN_AWAKE 0x400 /* Reusing for NAN */
#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800
#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000
#define WLC_PMD_NON_PRIM_STA_UP 0x4000
/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */
/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */
+/* Use regs from d11.h instead of raw addresses for */
/* (at least) the chip independent registers */
typedef struct ucode_dbg_ext {
uint32 x120;
} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
#include <packed_section_end.h>
+typedef struct tx_pwr_ru_rate_info {
+ uint16 version;
+ uint16 ru_alloc;
+ uint16 mcs;
+ uint16 nss;
+ uint16 num_he_ltf_syms;
+ uint16 ldpc;
+ uint16 gi;
+ uint16 txmode;
+ uint16 dcm;
+ uint16 tx_chain;
+} tx_pwr_ru_rate_info_t;
+
+#define TX_PWR_RU_RATE_INFO_VER 1
+
+/* TLV ID for curpower report, ID < 63 is reserved for ppr module */
+typedef enum tx_pwr_tlv_id {
+ TX_PWR_RPT_RU_RATE_INFO_ID = 64
+} tx_pwr_tlv_id_t;
+
#include <packed_section_start.h>
typedef BWL_PRE_PACKED_STRUCT struct {
struct ipv4_addr ipv4_addr;
uint16 rsvd; /* pad to align struct to 32bit bndry */
} wlc_btc_stats_v2_t;
+/* Durations for each bt task in millisecond */
+#define WL_BTCX_DURSTATS_VER_1 (1u)
+typedef struct wlc_btcx_durstats_v1 {
+ uint16 version; /* version number of struct */
+ uint16 valid; /* validity of this struct */
+ uint32 stats_update_timestamp; /* tStamp when data is updated */
+ uint16 bt_acl_dur; /* acl duration in ms */
+ uint16 bt_sco_dur; /* sco duration in ms */
+ uint16 bt_esco_dur; /* esco duration in ms */
+ uint16 bt_a2dp_dur; /* a2dp duration in ms */
+ uint16 bt_sniff_dur; /* sniff duration in ms */
+ uint16 bt_pscan_dur; /* page scan duration in ms */
+ uint16 bt_iscan_dur; /* inquiry scan duration in ms */
+ uint16 bt_page_dur; /* paging duration in ms */
+ uint16 bt_inquiry_dur; /* inquiry duration in ms */
+ uint16 bt_mss_dur; /* mss duration in ms */
+ uint16 bt_park_dur; /* park duration in ms */
+ uint16 bt_rssiscan_dur; /* rssiscan duration in ms */
+ uint16 bt_iscan_sco_dur; /* inquiry scan sco duration in ms */
+ uint16 bt_pscan_sco_dur; /* page scan sco duration in ms */
+ uint16 bt_tpoll_dur; /* tpoll duration in ms */
+ uint16 bt_sacq_dur; /* sacq duration in ms */
+ uint16 bt_sdata_dur; /* sdata duration in ms */
+ uint16 bt_rs_listen_dur; /* rs listen duration in ms */
+ uint16 bt_rs_burst_dur; /* rs brust duration in ms */
+ uint16 bt_ble_adv_dur; /* ble adv duration in ms */
+ uint16 bt_ble_scan_dur; /* ble scan duration in ms */
+ uint16 bt_ble_init_dur; /* ble init duration in ms */
+ uint16 bt_ble_conn_dur; /* ble connection duration in ms */
+ uint16 bt_task_lmp_dur; /* lmp duration in ms */
+ uint16 bt_esco_retran_dur; /* esco retransmission duration in ms */
+ uint16 bt_task26_dur; /* task26 duration in ms */
+ uint16 bt_task27_dur; /* task27 duration in ms */
+ uint16 bt_task28_dur; /* task28 duration in ms */
+ uint16 bt_task_pred_dur; /* prediction task duration in ms */
+ uint16 bt_multihid_dur; /* multihid duration in ms */
+} wlc_btcx_durstats_v1_t;
+
#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64
#define LOGRRC_FIX_LEN 8
#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
/* BCMWAPI_WAI */
-#define IV_LEN 16
+#define IV_LEN 16 /* same as SMS4_WPI_PN_LEN */
struct wapi_sta_msg_t
{
uint16 msg_type;
/* For NAN TX status */
typedef struct wl_nan_event_txs {
- uint8 status; /* For TX status, success or failure */
- uint8 reason_code; /* to identify reason when status is failure */
- uint16 host_seq; /* seq num to keep track of pkts sent by host */
- uint8 type; /* frame type */
+ uint8 status; /* For TX status, success or failure */
+ uint8 reason_code; /* to identify reason when status is failure */
+ uint16 host_seq; /* seq num to keep track of pkts sent by host */
+ uint8 type; /* wl_nan_frame_type_t */
uint8 pad;
uint16 opt_tlvs_len;
uint8 opt_tlvs[];
uint8 req_id; /* Requestor instance id */
} wl_nan_event_sd_txs_t;
+/* nanho fsm tlv WL_NAN_XTLV_NANHO_OOB_TXS(0x0b0a) */
+typedef struct wl_nan_event_nanho_txs {
+ uint32 fsm_id; /* nho fsm id */
+ uint16 seq_id; /* nho seq id */
+ uint16 pad;
+} wl_nan_event_nanho_txs_t;
+
/* Subscribe or Publish instance Terminated */
/* WL_NAN_EVENT_TERMINATED */
uint8 attr_list[0]; /* attributes payload */
} wl_nan_ev_receive_t;
+/* WL_NAN_EVENT_DISC_CACHE_TIMEOUT */
+#define WL_NAN_DISC_CACHE_EXPIRY_ENTRIES_MAX 8
+
+typedef struct wl_nan_disc_expired_cache_entry {
+ uint8 l_sub_id; /* local sub instance_id */
+ uint8 r_pub_id; /* remote-matched pub instance_id */
+ struct ether_addr r_nmi_addr; /* remote-matched pub nmi addr */
+} wl_nan_disc_expired_cache_entry_t;
+
+typedef struct wl_nan_ev_disc_cache_timeout {
+ uint16 count; /* no. of expired cache entries */
+ uint16 pad;
+ wl_nan_disc_expired_cache_entry_t cache_exp_list[];
+} wl_nan_ev_disc_cache_timeout_t;
+
/* For NAN event mask extention */
#define WL_NAN_EVMASK_EXTN_VER 1
#define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */
WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10),
WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11),
+ WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x12),
+ WL_NAN_XTLV_SD_PEER_NMI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x13),
WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
WL_NAN_XTLV_DAM_NA_ATTR = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), /* na attr */
WL_NAN_XTLV_HOST_ASSIST_REQ = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), /* host assist */
- WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), /* fw cap */
+ /* wl_nan_fw_cap_t */
+ WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
+ /* wl_nan_fw_cap_v2_t */
+ WL_NAN_XTLV_GEN_FW_CAP_V2 = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
WL_NAN_XTLV_SCHED_INFO = NAN_CMD(WL_NAN_CMD_SCHED_COMP_ID, 0x01),
WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06),
- WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07)
+ WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07),
+ WL_NAN_XTLV_NANHO_LOG_ERR_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x08),
+ WL_NAN_XTLV_NANHO_LOG_DBG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x09),
+ WL_NAN_XTLV_NANHO_OOB_TXS = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0A),
+ WL_NAN_XTLV_NANHO_DCAP_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0B),
+ WL_NAN_XTLV_NANHO_ELEM_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0C),
+ WL_NAN_XTLV_NANHO_SEC_SA = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0D)
} wl_nan_tlv_t;
/* Sub Module ID's for NAN */
WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B),
WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
- WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW,
+ WL_NAN_CMD_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
+ WL_NAN_CMD_SD_DISC_CACHE_CLEAR = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
+ WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_DISC_CACHE_CLEAR,
/* nan time sync sub-commands */
WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03),
WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04),
+ WL_NAN_CMD_RANGE_IDLE_COUNT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x05),
+ WL_NAN_CMD_RANGE_CANCEL_EXT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x06),
/* nan debug sub-commands */
WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2,
/* Host offload sub-commands */
- WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
- WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02),
- WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03),
- WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_OOB_NAF
+ WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), /* WILL BE REMOVED */
+ WL_NAN_CMD_NANHO_INFO = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
+ WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), /* unused */
+ WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), /* unused */
+ WL_NAN_CMD_NANHO_LOG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
+ WL_NAN_CMD_NANHO_VER = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
+ WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_VER,
+
+ /* Add submodules below, and update WL_NAN_CMD_MAX */
+
+ /* ROML check for this enum should use WL_NAN_CMD_MAX */
+ WL_NAN_CMD_MAX = WL_NAN_CMD_NANHO_MAX
};
/*
uint32 naf_rx; /* NAN AF rx */
uint32 sdf_tx; /* SDF tx */
uint32 sdf_rx; /* SDF rx */
+ uint32 cnt_sync_bcn_rx_tu[3]; /* delta bw */
+ uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
+ uint32 cnt_role_am_dw; /* anchor master role due to dw */
+ uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
} wl_nan_mac_stats_t;
/* NAN Sched stats */
#define WL_NAN_AMR_CHANGED 1
#define WL_NAN_IMR_CHANGED 2
+/*
+ * The macro BCMUTILS_ERR_CODES is defined only
+ * when using the common header file(a new approach) bcmutils/include/bcmerror.h.
+ * Otherwise, use the error codes from this file.
+ */
+#ifndef BCMUTILS_ERR_CODES
+
/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
enum {
/* add new status here... */
+ WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */
+ WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */
+ WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */
+ WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */
WL_NAN_E_PEER_NOTAVAIL = -2131,
WL_NAN_E_SCB_EXISTS = -2130,
WL_NAN_E_INVALID_PEER_NDI = -2129,
WL_NAN_E_INVALID_MAC = -2049,
WL_NAN_E_BAD_INSTANCE = -2048,
/* NAN status code reserved from -2048 to -3071 */
+ /* Do NOT add new status below -2048 */
WL_NAN_E_ERROR = -1,
WL_NAN_E_OK = 0
};
+#endif /* BCMUTILS_ERR_CODES */
+
/* Error codes used in vendor specific attribute in Data Path Termination frames */
enum {
WL_NAN_DPEND_E_OK = 0,
WL_NAN_DPEND_E_INVALID_NDL_QOS = 15,
WL_NAN_DPEND_E_INVALID_SEC_PARAMS = 16,
WL_NAN_DPEND_E_REJECT_AVAIL = 17,
- WL_NAN_DPEND_E_REJECT_NDL = 18
+ WL_NAN_DPEND_E_REJECT_NDL = 18,
+ WL_NAN_DPEND_E_SCB_NORESOURCE = 19
};
typedef int32 wl_nan_status_t;
uint8 election_mode; /* Election mode, host or firmware */
uint8 hop_count; /* Current Hop count */
uint8 imr[8]; /* Immediate Master Rank */
- uint8 pad[4];
+ uint8 pad[4]; /* remove after precommit */
uint16 opt_tlvs_len;
uint8 opt_tlvs[];
} wl_nan_conf_status_t;
*/
typedef struct ether_addr wl_nan_cluster_id_t;
+#define NHO_SEC_NCS_SK_REPLAY_CNT_LEN 8u
+/* kck */
+#define NHO_SEC_NCS_SK_256_KCK_LEN 24u /* refer nan2 r21 7.1.4.1 */
+/* kek */
+#define NHO_SEC_NCS_SK_256_KEK_LEN 32u /* refer nan2 r21 7.1.4.1 */
+/* tk */
+#define NHO_SEC_NCS_SK_256_TK_LEN 32u /* refer nan2 r21 section 7 */
+
+#define NHO_SEC_NCS_SK_MAX_KEY_LEN (NHO_SEC_NCS_SK_256_KCK_LEN \
+ + NHO_SEC_NCS_SK_256_KEK_LEN \
+ + NHO_SEC_NCS_SK_256_TK_LEN)
+
+#define NHO_SEC_KEY_INSTALL_FLAG (1 << 0)
+#define NHO_SEC_KEY_UNINSTALL_FLAG (1 << 1)
+
+/* WL_NAN_XTLV_NANHO_SEC_SA */
+typedef struct nanho_sec_sa {
+ int8 csid; /* Cipher suite ID to identify the security type */
+ uint8 kck_len; /* KCK len in key_buf */
+ uint8 kek_len; /* KEK len in key_buf */
+ uint8 tk_len; /* Transient key len in key_buf */
+ uint16 flags;
+ uint16 pad;
+ struct ether_addr laddr; /* local mac addr */
+ struct ether_addr raddr; /* remote mac addr */
+ uint8 key_krc[NHO_SEC_NCS_SK_REPLAY_CNT_LEN]; /* Key Replay ctr */
+ uint8 key_buf[NHO_SEC_NCS_SK_MAX_KEY_LEN]; /* PTK = KCK + KEK + TK */
+} nanho_sec_sa_t;
+
/*
* WL_NAN_CMD_ELECTION_MERGE
* 0 - disable cluster merge
uint32 ambtt;
} nan_sync_master_t;
-/*
-* NAN Sync TLV(NSTLV):
-* To keep NAN/AWDL concurrency time sync.
-* It is generated at hybrid device, and propogated by AWDL only device.
-* It contains the information needed to run NAN election
-*/
-#include <packed_section_start.h>
-typedef BWL_PRE_PACKED_STRUCT struct awdl_nan_sync_tlv {
- uint16 hop_count; /* total hop_count */
- struct ether_addr src_addr; /* macaddr of the hybrid originator of nstlv */
- struct ether_addr cluster_id; /* NAN cluster ID of hybrid originator of nstlv */
- uint32 nan_tsf_h; /* NAN cluster TSF of the hybrid originator of nstlv */
- uint32 nan_tsf_l;
- uint8 master_preference;
- uint8 random_factor;
- uint8 amr[WL_NAN_MASTER_RANK_LEN];
- uint8 orig_hop_count; /* hop_count of the origin hybrid NAN device */
- uint32 ambtt; /* Anchor Master Beacon Transmission Time */
- uint8 opt_xtlv_len; /* xtlv len */
-} BWL_POST_PACKED_STRUCT awdl_nan_sync_tlv_t;
-
-typedef BWL_PRE_PACKED_STRUCT struct wl_awdl_nan_sync_tlv {
- uint8 type; /* 23 for NTLV */
- uint16 param_len;
- awdl_nan_sync_tlv_t ntlv;
-} BWL_POST_PACKED_STRUCT wl_awdl_nan_sync_tlv_t;
-#include <packed_section_end.h>
-
/* NAN advertiser structure */
/* TODO RSDB: add chspec to indicates core corresponds correct core */
typedef struct nan_adv_entry {
#define NAN_VIRTUAL_PEER_BIT 0x80
typedef enum {
- NAC_CNT_NTLV_AF_TX = 0, /* count of AWDL AF containing NTLV tx */
- NAC_CNT_NTLV_AF_RX, /* count of AWDL AF containing NTLV rx */
+ NAC_CNT_NTLV_AF_TX = 0, /* count of AF containing NTLV tx */
+ NAC_CNT_NTLV_AF_RX, /* count of AF containing NTLV rx */
NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */
NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */
NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */
uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */
} wl_nan_sd_transmit_t;
+/* disc cache timeout for a cache entry */
+typedef uint16 wl_nan_disc_cache_timeout_t;
+
/*
* WL_NAN_CMD_SYNC_TSRESERVE
*/
* discovery interface event structures *
*/
+enum wl_nan_oob_af_flags {
+ WL_NAN_OOB_AF_FLAG_SEND_EVENT = 0x0001, /* send tx status event */
+ WL_NAN_OOB_AF_FLAG_FLUSH_PCB = 0x0002, /* flush PCB */
+ WL_NAN_OOB_AF_FLAG_ADD_DCAP = 0x0004, /* add dev cap attr into NAF body */
+ WL_NAN_OOB_AF_FLAG_ADD_ELMT = 0x0008, /* add elmt container attr into NAF body */
+ WL_NAN_OOB_AF_FLAG_MFP_REQUIRED = 0x0010 /* MFP required */
+};
+typedef uint16 wl_nan_oob_af_flags_t;
+
/* mandatory parameters for OOB action frame */
-/* single-shot when bitmap and offset are set to 0; periodic otherwise */
typedef struct wl_nan_oob_af_params_s
{
- /* bitmap for the 32 timeslots in 512TU dw interval */
- uint32 ts_map;
- /* offset from start of dw, in us */
- uint32 tx_offset;
+ uint8 fup_lcl_id; /* local instance ID of follow-up SDF */
+ uint8 fup_peer_id; /* peer instance ID of follow-up SDF */
+ uint8 sdf_type; /* represented by service control type NAN_SC_XXX */
+ uint8 unused_uint8;
+ uint32 unused_uint32;
struct ether_addr bssid;
struct ether_addr dest;
uint32 pkt_lifetime;
+ uint8 n2af_sub_type; /* NAN2 AF sub type */
+ uint8 retry_cnt; /* NAF tx retry (not 802.11 re-tx) */
+ uint16 token; /* NAN host seq num */
+ uint16 flags; /* wl_nan_oob_af_flags_t */
+ uint32 fsm_id; /* unique fsm id */
uint16 payload_len;
uint8 payload[1];
} wl_nan_oob_af_params_t;
WL_NAN_HAST_REASON_NONE = 0,
/* reason for host assist request */
- WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */
- WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */
- WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */
- WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */
- WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */
- WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6 /* SD: NDI associated with svc is missing */
+ WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */
+ WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */
+ WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */
+ WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */
+ WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */
+ WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6, /* SD: NDI associated with svc is missing */
+ WL_NAN_HAST_REASON_PEER_SCB_NORESOURCE = 7 /* NDP: no more peer scb available */
};
typedef uint8 wl_nan_host_assist_reason_t;
uint8 pad[3];
} wl_nan_fw_cap_t;
+/* WL_NAN_XTLV_GEN_FW_CAP_V2 */
+typedef struct wl_nan_fw_cap_v2 {
+ uint32 flags1; /* nan sub-features compiled in firmware */
+ uint32 flags2; /* for more sub-features in future */
+ uint8 max_svc_publishes; /* max num of service publish */
+ uint8 max_svc_subscribes; /* max num of service subscribe */
+ uint8 max_lcl_sched_maps; /* max num of local schedule map */
+ uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
+ uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
+ uint8 max_peer_entries; /* max num of peer entry */
+ uint8 max_peer_sched_maps; /* max num of peer schedule maps */
+ uint8 max_ndp_sessions; /* max num of NDP session */
+ uint32 cipher_suites_supported_mask; /* bitmask for supported cipher suites */
+ uint32 reserved_uint32_1; /* reserved for future sub-features */
+ uint32 reserved_uint32_2; /* reserved for future sub-features */
+ uint32 reserved_uint32_3; /* reserved for future sub-features */
+ uint32 reserved_uint32_4; /* reserved for future sub-features */
+} wl_nan_fw_cap_v2_t;
+
/* nan cipher suite support mask bits */
#define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01
#define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02
/* NAN2.0 Ranging definitions */
/* result indication bit map */
+#define NAN_RANGE_INDICATION_NONE 0
#define NAN_RANGE_INDICATION_CONT (1<<0)
#define NAN_RANGE_INDICATION_INGRESS (1<<1)
#define NAN_RANGE_INDICATION_EGRESS (1<<2)
#define NAN_RNG_RESP_IOV_LEN 20
+#define NAN_RNG_TERM_FLAG_IMMEDIATE (1u << 0u) /* Do not wait for TXS */
+#define NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN (1u << 1u) /* Do not TX rng_term */
+#define NAN_RNG_TERM_FLAG_EVENT_HOST (1u << 2u) /* Notify event to host */
+#define NAN_RNG_TERM_FLAG_OPT_TLVS (1u << 3u) /* opt tlvs present */
+
+typedef struct wl_nan_range_cancel_ext {
+ wl_nan_range_id range_id;
+ uint8 flags;
+ uint8 pad[2];
+} wl_nan_range_cancel_ext_t;
+
+#define NAN_RNG_CANCEL_IOV_FIXED_LEN 4u
+
#define NAN_RNG_MAX_IOV_LEN 255
typedef struct wl_nan_ev_rng_req_ind {
#define NAN_RNG_RPT_IND_SIZE 12
+/* number of continuous ranging crbs which can be idle,
+* after which ranging session will be terminated.
+* Default value is 5. Set to zero for disabling the
+* idle timeout functionality
+*/
+typedef uint8 wl_nan_range_idle_count_t;
+
/* nan ranging termination reason codes */
-#define NAN_RNG_TERM_IDLE_TIMEOUT 1 /* no ftms from peer */
-#define NAN_RNG_TERM_PEER_REQ 2
-#define NAN_RNG_TERM_USER_REQ 3
-#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4
+#define NAN_RNG_TERM_UNSPECIFIED 0
+#define NAN_RNG_TERM_IDLE_TIMEOUT 1u /* no ftms from peer */
+#define NAN_RNG_TERM_PEER_REQ 2u
+#define NAN_RNG_TERM_USER_REQ 3u
+#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4u /* On FSM Timeout, waiting for Resp from peer */
+#define NAN_RNG_TERM_RNG_RESP_REJ 5u /* On range resp, reject from peer */
+#define NAN_RNG_TERM_RNG_TXS_FAIL 6u /* On range req/resp txs fail */
typedef struct wl_nan_ev_rng_term_ind {
struct ether_addr peer_m_addr;
#define WL_NAN_FASTDISC_CFG_SIZE 1024 /* arbitrary */
-#ifdef WL_NANHO
-/* ****************** NAN Host offload specific strucures ****************** */
-
-enum wl_nan_rx_mgmt_frm_type {
- WL_NAN_RX_MGMT_FRM_BCN = 0,
- WL_NAN_RX_MGMT_FRM_SDF = 1,
- WL_NAN_RX_MGMT_FRM_NAF = 2
-};
-typedef uint8 wl_nan_rx_mgmt_frm_type_t;
-
-/* WL_NAN_EVENT_RX_MGMT_FRM */
-typedef struct wl_nan_event_rx_mgmt_frm {
- uint8 frm_type; /* wl_nan_rx_mgmt_frm_type_t */
- uint8 pad;
- uint16 frm_len;
- uint8 frm[];
-} wl_nan_event_rx_mgmt_frm_t;
-
-#define WL_NAN_NANHO_UPDATE_MAX_SIZE 2048 /* arbitrary */
-
-enum wl_nan_peer_entry_action {
- WL_NAN_PEER_ENTRY_ACT_ADD = 0, /* add peer entry */
- WL_NAN_PEER_ENTRY_ACT_REMOVE = 1 /* remove peer entry */
-};
-typedef uint8 wl_nan_peer_entry_action_t;
-
-/* WL_NAN_XTLV_NANHO_PEER_ENTRY */
-typedef struct wl_nan_peer_entry
-{
- struct ether_addr nmi; /* nmi of peer device */
- uint8 action; /* wl_nan_peer_entry_action_t */
- uint8 pad;
-} wl_nan_peer_entry_t;
-
-enum wl_nan_dcaplist_action {
- WL_NAN_DCAPLIST_ACT_UPDATE = 0, /* update or add */
- WL_NAN_DCAPLIST_ACT_REMOVE = 1 /* remove (only for peer dcap cache entry) */
-};
-typedef uint8 wl_nan_dcaplist_action_t;
-
-/* WL_NAN_XTLV_NANHO_DCAPLIST */
-typedef struct wl_nan_dev_cap_list
-{
- struct ether_addr nmi; /* null for local device */
- uint8 action; /* wl_nan_dcaplist_action_t */
- /* optional fields for WL_NAN_DCAPLIST_ACT_UPDATE */
- uint8 num_maps;
- uint8 dcap[]; /* list of nan_dev_cap_t */
-} wl_nan_dev_cap_list_t;
-
-typedef struct wl_nan_dev_chan_sched {
- uint16 num_slots; /* number of slot in schedule */
- uint16 period; /* period of channel schedule (TU) */
- uint8 slot_dur; /* slot duration (TU) */
- uint8 map_id; /* map id (TBD) */
- uint8 pad[2];
- uint8 data[];
- /* chanspec_t chan_sched[num_slot] */
- /* uint8 slot_info[num_slot] */
-} wl_nan_dev_chan_sched_t;
-
-/* WL_NAN_XTLV_NANHO_DCSLIST */
-typedef struct wl_nan_dev_chan_sched_list {
- struct ether_addr nmi; /* null for local device */
- uint8 num_maps;
- uint8 pad;
- wl_nan_dev_chan_sched_t dcs[];
-} wl_nan_dev_chan_sched_list_t;
-
-/* WL_NAN_XTLV_NANHO_BLOB */
-typedef struct wl_nan_dev_blob {
- struct ether_addr nmi; /* null for local device */
- uint16 blob_len; /* blob len in blob[] buffer */
- uint8 blob_type;
- uint8 pad[3];
- uint8 blob[];
-} wl_nan_dev_blob_t;
-
-typedef struct wl_nan_peer_ndl_state {
- struct ether_addr nmi;
- uint8 ndl_state; /* nan_peer_ndl_state_t */
- uint8 pad;
-} wl_nan_peer_ndl_state_t;
-
-enum wl_nan_ndp_state_action {
- WL_NAN_NDP_STATE_ACT_ESTABLISHED = 0,
- WL_NAN_NDP_STATE_ACT_TERMINATED = 1
-};
-typedef uint8 wl_nan_ndp_state_action_t;
-
-/* WL_NAN_XTLV_NANHO_NDP_STATE */
-typedef struct wl_nan_ndp_state {
- struct ether_addr peer_nmi;
- struct ether_addr peer_ndi;
- struct ether_addr lcl_ndi;
- uint8 action; /* wl_nan_ndp_state_action_t */
- uint8 pad;
- /* TODO: secured NDP information */
-} wl_nan_ndp_state_t;
-
-/* *************** end of NAN Host offload specific strucures ************** */
-#endif /* WL_NANHO */
-
/* ********************* end of NAN section ******************************** */
/* endif WL_NAN */
WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6),
+ WL_WSEC_INFO_BSS_WPA_AP_RESTRICT = (WL_WSEC_INFO_BSS_BASE + 7),
/* add per-BSS properties above */
WL_WSEC_INFO_MAX = 0xffff
} wl_wsec_info_type_t;
uint8 num_tlvs;
wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */
} wl_wsec_info_t;
+#define AP_BLOCK_NONE 0x0000 /* default: No restriction */
+#define AP_ALLOW_WPA2 0x0001 /* allow WPA2PSK AP */
+#define AP_ALLOW_TSN 0x0002 /* WPA3 transition AP */
+#define AP_ALLOW_WPA3_ONLY 0x0004 /* WPA3 only AP */
+#define AP_ALLOW_MAX (AP_ALLOW_WPA2 | AP_ALLOW_TSN | \
+ AP_ALLOW_WPA3_ONLY)
+typedef struct {
+ uint32 wpa_ap_restrict; /* set WPA2 / WPA3 AP restriction policy */
+} wl_wsec_info_wpa_ap_restrict_t;
/*
* randmac definitions
GPAIO_DAC_IQ_DC_RDBK,
GPAIO_DAC_IQ_DC_RDBK_CLEAR,
GPAIO_AFE_LDO_FOR_DAC_DC,
- GPAIO_PA5G_VCAS_SOURCE
+ GPAIO_PA5G_VCAS_SOURCE,
+ GPAIO_BIQ2_DC_MEAS,
+ GPAIO_BIQ2_DC_CLEAR,
+ GPAIO_VBATMONITOR,
+ GPAIO_PA5G_VCAS_GMDRAIN
} wl_gpaio_option_t;
/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
}
wl_band_t;
+#define WL_ROAM_STATS_VER_1 (1u) /**< current version of wl_if_stats structure */
+
+/** roam statistics counters */
+typedef struct {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 last_roam_event_type;
+ uint32 last_roam_event_status;
+ uint32 last_roam_event_reason;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+ uint16 most_roam_reason;
+ uint16 most_roam_reason_cnt;
+} wl_roam_stats_v1_t;
+
#define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */
/** wlc interface version */
}
wl_wlc_version_t;
+#define WL_SCAN_VERSION_T_VERSION 1 /**< current version of scan_version structure */
+/** scan interface version */
+typedef struct wl_scan_version {
+ uint16 version; /**< version of the structure */
+ uint16 length; /**< length of the entire structure */
+
+ /* scan interface version numbers */
+ uint16 scan_ver_major; /**< scan interface major version number */
+} wl_scan_version_t;
+
/* Highest version of WLC_API_VERSION supported */
#define WLC_API_VERSION_MAJOR_MAX 8
#define WLC_API_VERSION_MINOR_MAX 0
WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */
WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */
WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */
+ WL_PROXD_SESSION_FLAG_RANDMAC = 0x00001000, /**< use random mac */
WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */
WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */
WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000,
typedef uint16 wl_proxd_session_id_t;
+/* Use WL_PROXD_E_* errorcodes from this file if BCMUTILS_ERR_CODES not defined */
+#ifndef BCMUTILS_ERR_CODES
+
/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
enum {
WL_PROXD_E_LAST = -1056,
};
typedef int32 wl_proxd_status_t;
+#endif /* BCMUTILS_ERR_CODES */
+
/* proxd errors from phy */
#define PROXD_TOF_INIT_ERR_BITS 16
typedef uint32 wl_proxd_debug_mask_t;
/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
-enum {
+typedef enum {
WL_PROXD_TLV_ID_NONE = 0,
WL_PROXD_TLV_ID_METHOD = 1,
WL_PROXD_TLV_ID_FLAGS = 2,
WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */
WL_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */
+ WL_PROXD_TLV_ID_COLLECT_INLINE_HEADER = 1032,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_INFO = 1033,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_DATA = 1034,
+ WL_PROXD_TLV_ID_COLLECT_INLINE_RESULTS = 1035,
+
WL_PROXD_TLV_ID_MAX
-};
+} wl_proxd_tlv_types_t;
+
+#define TOF_COLLECT_INLINE_HEADER_INFO_VER_1 1
+
+typedef struct wl_proxd_collect_inline_header_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ uint32 ratespec; /* override */
+ chanspec_t chanspec;
+ uint16 num_ftm;
+ struct ether_addr peer_mac;
+ struct ether_addr cur_ether_addr; /* source address for Tx */
+} wl_proxd_collect_inline_header_info_v1_t;
+
+#define TOF_COLLECT_INLINE_RESULTS_VER_1 1
+typedef struct wl_proxd_collect_inline_results_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ uint32 meanrtt;
+ uint32 distance;
+ uint16 num_rtt;
+ uint16 pad2;
+ int32 status;
+ uint32 ratespec;
+} wl_proxd_collect_inline_results_info_v1_t;
+
+#define TOF_COLLECT_INLINE_FRAME_INFO_VER_1 1
+typedef struct wl_proxd_collect_inline_frame_info_v1
+{
+ uint16 version;
+ uint16 pad1;
+ int32 gd;
+ uint32 T[4];
+ uint32 prev_t1;
+ uint32 prev_t4;
+ int32 hadj;
+ int8 rssi;
+ uint8 pad[3];
+} wl_proxd_collect_inline_frame_info_v1_t;
+
+#define TOF_COLLECT_INLINE_FRAME_INFO_VER_2 2
+typedef struct wl_proxd_collect_inline_frame_info_v2
+{
+ uint16 version;
+ uint16 pad1;
+ int32 gd;
+ uint32 T[4];
+ int32 hadj;
+ int8 rssi;
+ uint8 pad[3];
+} wl_proxd_collect_inline_frame_info_v2_t;
typedef struct wl_proxd_tlv {
uint16 id;
wl_proxd_event_type_t type;
wl_proxd_method_t method;
wl_proxd_session_id_t sid;
- uint8 pad[2];
+ uint8 pad[2]; /* This field is used fragmentation purpose */
wl_proxd_tlv_t tlvs[1]; /**< variable */
} wl_proxd_event_t;
#define WL_ESTM_ROAM_DELTA_DEFAULT 10
typedef struct wl_roam_prof_v3 {
- int8 roam_flags; /**< bit flags */
+ uint8 roam_flags; /**< bit flags */
int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
int8 rssi_lower;
int8 roam_delta;
typedef enum wl_interface_type {
WL_INTERFACE_TYPE_STA = 0,
WL_INTERFACE_TYPE_AP = 1,
- WL_INTERFACE_TYPE_AWDL = 2,
WL_INTERFACE_TYPE_NAN = 3,
WL_INTERFACE_TYPE_P2P_GO = 4,
WL_INTERFACE_TYPE_P2P_GC = 5,
#define WL_INTERFACE_INFO_VER_1 1
#define WL_INTERFACE_INFO_VER_2 2
+typedef struct wl_interface_info {
+ uint16 ver; /* version of this struct */
+ struct ether_addr mac_addr; /* MAC address of the interface */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */
+ uint8 bsscfgidx; /* source bsscfg index */
+} wl_interface_info_t;
+
typedef struct wl_interface_info_v1 {
uint16 ver; /**< version of this struct */
struct ether_addr mac_addr; /**< MAC address of the interface */
#define CAP_2G_DEPON_5G (0x10) /* 2G pwr caps depend on other slice 5G subband */
#define CAP_SISO_MIMO (0x20) /* Siso/Mimo Separate Power Caps */
#define CAP_ANT_TX (0x40) /* Separate Power Caps based on cell ant tx value */
+#define CAP_LTE_PQBIT (0x100u) /* QPBit is enabled */
#define CAP_ONOFF_BODY_CCK_OFDM (CAP_ONOFF_BODY | CAP_CCK_OFDM)
#define CAP_TXPWR_ALL (CAP_ONOFF_BODY|CAP_CCK_OFDM|CAP_LTE_CELL|\
CAP_SISO_MIMO|CAP_HEAD_BODY|CAP_ANT_TX)
int8 pwrs[][TXPWRCAP_MAX_NUM_SUBGRPS][TXPWRCAP_MAX_NUM_ANTENNAS_V3];
} wl_txpwrcap_v2_t;
-#define TXPWRCAP_DUMP_VERSION_4 4
-#define TXPWRCAP_DUMP_VERSION_5 5
+#define TXPWRCAP_DUMP_VERSION_4 4u
+#define TXPWRCAP_DUMP_VERSION_5 5u
+#define TXPWRCAP_DUMP_VERSION_6 6u
typedef struct wl_txpwrcap_dump_v4 {
uint8 version;
int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
} wl_txpwrcap_dump_v5_t;
+typedef struct wl_txpwrcap_dump_v6 {
+ uint8 version;
+ uint8 num_pwrcap;
+ uint8 current_country[2];
+ uint8 current_channel;
+ uint8 high_cap_state_enabled;
+ uint8 reserved[2];
+ uint8 download_present;
+ uint8 num_ants; /* number antenna slice */
+ uint8 num_cc_groups; /* number cc groups */
+ uint8 current_country_cc_group_info_index;
+ uint8 ant_tx; /* current value of ant_tx */
+ uint8 cell_status; /* current value of cell status */
+ uint16 capability[TXHDR_MAX_SECTION]; /* capabilities */
+ int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
+} wl_txpwrcap_dump_v6_t;
+
+#define TXCAPINFO_VERSION_1 1
+typedef struct wl_txpwrcap_ccgrp_info {
+ uint8 num_cc;
+ char cc_list[1][2]; /* 2 letters for each country. At least one country */
+} wl_txpwrcap_ccgrp_info_t;
+
+typedef struct {
+ uint16 version;
+ uint16 length; /* length in bytes */
+ uint8 num_ccgrp;
+ /* followed by one or more wl_txpwrcap_ccgrp_info_t */
+ wl_txpwrcap_ccgrp_info_t ccgrp_data[1];
+} wl_txpwrcap_info_t;
+
typedef struct wl_txpwrcap_tbl {
uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
/* Stores values for valid antennas */
uint32 he_txtbppdu;
} wl_compact_he_cnt_wlc_v1_t;
+#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_2 (2)
+typedef struct {
+ uint16 version;
+ uint16 len;
+ uint32 he_rxtrig_myaid;
+ uint32 he_rxtrig_rand;
+ uint32 he_colormiss_cnt;
+ uint32 he_txmampdu;
+ uint32 he_txmtid_back;
+ uint32 he_rxmtid_back;
+ uint32 he_rxmsta_back;
+ uint32 he_txfrag;
+ uint32 he_rxdefrag;
+ uint32 he_txtrig;
+ uint32 he_rxtrig_basic;
+ uint32 he_rxtrig_murts;
+ uint32 he_rxtrig_bsrp;
+ uint32 he_rxhemuppdu_cnt;
+ uint32 he_physu_rx;
+ uint32 he_phyru_rx;
+ uint32 he_txtbppdu;
+ uint32 he_null_tbppdu;
+ uint32 he_rxhesuppdu_cnt;
+ uint32 he_rxhesureppdu_cnt;
+ uint32 he_null_zero_agg;
+ uint32 he_null_bsrp_rsp;
+ uint32 he_null_fifo_empty;
+} wl_compact_he_cnt_wlc_v2_t;
+
+/* for future versions of this data structure, can consider wl_txbf_ecounters_t
+ * which contains the full list of txbf dump counters
+ */
typedef struct {
uint16 version;
uint16 coreup;
uint32 txdeauthivalclass;
} wl_event_based_statistics_v1_t;
+#define WL_EVENT_STATISTICS_VER_2 (2)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+} wl_event_based_statistics_v2_t;
+
+#define WL_EVENT_STATISTICS_VER_3 (3)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint16 PAD;
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+ /* addition for v3 (roam statistics) */
+ uint32 initial_assoc_time;
+ uint32 prev_roam_time;
+ uint32 last_roam_event_type;
+ uint32 last_roam_event_status;
+ uint32 last_roam_event_reason;
+ uint16 roam_success_cnt;
+ uint16 roam_fail_cnt;
+ uint16 roam_attempt_cnt;
+ uint16 max_roam_target_cnt;
+ uint16 min_roam_target_cnt;
+ uint16 max_cached_ch_cnt;
+ uint16 min_cached_ch_cnt;
+ uint16 partial_roam_scan_cnt;
+ uint16 full_roam_scan_cnt;
+ uint16 most_roam_reason;
+ uint16 most_roam_reason_cnt;
+} wl_event_based_statistics_v3_t;
+
+#define WL_EVENT_STATISTICS_VER_4 (4u)
+/* Event based statistics ecounters */
+typedef struct {
+ uint16 version;
+ uint16 pad;
+ struct ether_addr BSSID; /* BSSID of the BSS */
+ uint16 PAD;
+ uint32 txdeauthivalclass;
+ /* addition for v2 */
+ int32 timestamp; /* last deauth time */
+ struct ether_addr last_deauth; /* wrong deauth MAC */
+ uint16 misdeauth; /* wrong deauth count every 1sec */
+ int16 cur_rssi; /* current bss rssi */
+ int16 deauth_rssi; /* deauth pkt rssi */
+} wl_event_based_statistics_v4_t;
+
/* ##### Ecounters v2 section ##### */
#define ECOUNTERS_VERSION_2 2
} mu_group_t;
typedef struct mupkteng_sta {
- struct ether_addr ea;
+ struct ether_addr ea;
uint8 PAD[2];
- int32 nrxchain;
- int32 idx;
+ int32 nrxchain;
+ int32 idx;
} mupkteng_sta_t;
typedef struct mupkteng_client {
- int32 rspec;
- int32 idx;
- int32 flen;
- int32 nframes;
+ int32 rspec;
+ int32 idx;
+ int32 flen;
+ int32 nframes;
} mupkteng_client_t;
typedef struct mupkteng_tx {
- mupkteng_client_t client[8];
- int32 nclients;
- int32 ntx;
+ mupkteng_client_t client[8];
+ int32 nclients;
+ int32 ntx;
} mupkteng_tx_t;
/*
/* ifdef WL11ULB */
/* ULB Mode configured via "ulb_mode" IOVAR */
enum {
- ULB_MODE_DISABLED = 0,
- ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
- ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
+ ULB_MODE_DISABLED = 0,
+ ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
+ ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
/* Add all other enums before this */
- MAX_SUPP_ULB_MODES
+ MAX_SUPP_ULB_MODES
};
/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only.
* 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute)
*/
typedef enum {
- ULB_BW_DISABLED = 0,
- ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
- ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
- ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
+ ULB_BW_DISABLED = 0,
+ ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
+ ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
+ ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
/* Add all other enums before this */
- MAX_SUPP_ULB_BW
+ MAX_SUPP_ULB_BW
} ulb_bw_type_t;
/* endif WL11ULB */
#define WL_MESH_IOCTL_VERSION 1
#define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */
+
/* container for mesh iovtls & events */
typedef struct wl_mesh_ioc {
uint16 version; /* interface command or event version */
/* values for IOV_MFP arg */
enum {
- WL_MFP_NONE = 0,
- WL_MFP_CAPABLE,
- WL_MFP_REQUIRED
+ WL_MFP_NONE = 0,
+ WL_MFP_CAPABLE,
+ WL_MFP_REQUIRED
};
typedef enum {
CHANSW_IOVAR = 7, /* channel switch due to IOVAR */
CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */
CHANSW_APCS = 9, /* Channel switch from AP channel select module */
- CHANSW_AWDL = 10, /* channel switch due to AWDL */
CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */
CHANSW_UPDBW = 12, /* channel switch at update bandwidth */
CHANSW_ULB = 13, /* channel switch at ULB */
WL_SLOTTED_BSS_CMD_CHANSEQ = 1,
WL_SLOTTED_BSS_CMD_CS_BMP = 2 /* critical slots bitmap */
};
+
typedef uint16 chan_seq_type_t;
enum chan_seq_type {
- CHAN_SEQ_TYPE_AWDL = 1,
- CHAN_SEQ_TYPE_SLICE = 2,
- CHAN_SEQ_TYPE_NAN = 3
+ CHAN_SEQ_TYPE_AWDL = 1,
+ CHAN_SEQ_TYPE_SLICE = 2,
+ CHAN_SEQ_TYPE_NAN = 3, /* NAN avail XTLV */
+ CHAN_SEQ_TYPE_NANHO = 4 /* NANHO channel schedule XTLV */
};
+
typedef uint8 sched_flag_t;
enum sched_flag {
NO_SDB_SCHED = 0x1,
#define SLOT_BSS_SLICE_TYPE_DUR_MAX_RANGE 2u
#define SLOTTED_BSS_AGGR_EN (1 << 0) /* Bitmap of mode */
#define SLOTTED_BSS_AGGR_LIMIT_DUR (1 << 1) /* Jira 49554 */
+#define SLOTTED_BSS_HE_1024_QAM_SUPPORT (1 << 2) /* MCS10-11 Support */
#define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs)
/* Definitions for slotted_bss stats */
#define WLC_UTRACE_LEN (1024u * 4u) // default length
#define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM
+#define WLC_UTRACE_LEN_SC (1024u * 3u) // reduced length to fit smaller Scan core BM
+
#define WLC_UTRACE_READ_END 0
#define WLC_UTRACE_MORE_DATA 1
+
typedef struct wl_utrace_capture_args_v1 {
uint32 length;
uint32 flag;
WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */
WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */
WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */
+ WL_HC_RX_XTLV_ID_VAL_STALL_UC_DECRYPT_FAIL = 6, /* trigger uc decrypt failures */
+ WL_HC_RX_XTLV_ID_VAL_STALL_BCMC_DECRYPT_FAIL = 7, /* trigger bcmc decrypt failures */
};
/* Health Check: Datapath SCAN IDs */
/* IDs of Health Check report structures for sub types of health checks within WL */
typedef enum wl_hc_dd_type {
- WL_HC_DD_PCIE = 0, /* PCIe */
- WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */
- WL_HC_DD_RX_STALL = 2, /* RX stall check */
- WL_HC_DD_TX_STALL = 3, /* TX stall check */
- WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */
- WL_HC_DD_PHY = 5, /* PHY health check */
- WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */
- WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */
+ WL_HC_DD_PCIE = 0, /* PCIe */
+ WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */
+ WL_HC_DD_RX_STALL = 2, /* RX stall check */
+ WL_HC_DD_TX_STALL = 3, /* TX stall check */
+ WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */
+ WL_HC_DD_PHY = 5, /* PHY health check */
+ WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */
+ WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */
+ WL_HC_DD_RX_STALL_V2 = 8, /* RX stall check v2 */
WL_HC_DD_MAX
} wl_hc_dd_type_t;
+/* RX stall reason codes sent with wl_rx_hc_info_v2_t */
+typedef enum bcm_rx_hc_stall_reason {
+ BCM_RX_HC_RESERVED = 0,
+ BCM_RX_HC_UNSPECIFIED = 1, /* All other. Catch all */
+ BCM_RX_HC_UNICAST_DECRYPT_FAIL = 2, /* Unicast decrypt fail */
+ BCM_RX_HC_BCMC_DECRYPT_FAIL = 3, /* BCMC decrypt fail */
+ BCM_RX_HC_UNICAST_REPLAY = 4, /* Unicast replay */
+ BCM_RX_HC_BCMC_REPLAY = 5, /* BCMC replay */
+ BCM_RX_HC_AMPDU_DUP = 6, /* AMPDU DUP */
+ BCM_RX_HC_MAX
+} bcm_rx_hc_stall_reason_t;
+
/*
* Health Check report structures for sub types of health checks within WL
*/
/* Health Check report structure for Rx dropped packet failure check */
typedef struct {
- uint16 type;
+ uint16 type; /* WL_HC_RX_DD_STALL */
uint16 length;
uint32 bsscfg_idx;
uint32 rx_hc_pkts;
uint32 rx_hc_alert_th;
} wl_rx_hc_info_t;
+/* Health Check report structure for Rx dropped packet failure check */
+typedef struct {
+ uint16 type; /* WL_HC_RX_DD_STALL_V2 */
+ uint16 length;
+ uint8 if_idx; /* interface index on which issue is reported */
+ uint8 ac; /* access category on which this problem is seen */
+ uint8 pad[2]; /* Reserved */
+ uint32 rx_hc_pkts;
+ uint32 rx_hc_dropped_all;
+ uint32 rx_hc_alert_th;
+ uint32 reason; /* refer to bcm_rx_hc_stall_reason_t above */
+ struct ether_addr peer_ea;
+} wl_rx_hc_info_v2_t;
+
/* HE top level command IDs */
enum {
- WL_HE_CMD_ENAB = 0,
- WL_HE_CMD_FEATURES = 1,
- WL_HE_CMD_TWT_SETUP = 2,
- WL_HE_CMD_TWT_TEARDOWN = 3,
- WL_HE_CMD_TWT_INFO = 4,
- WL_HE_CMD_BSSCOLOR = 5,
- WL_HE_CMD_PARTIAL_BSSCOLOR = 6,
- WL_HE_CMD_CAP = 7,
- WL_HE_CMD_STAID = 8,
- WL_HE_CMD_RTSDURTHRESH = 10,
- WL_HE_CMD_PEDURATION = 11,
- WL_HE_CMD_TESTBED_MODE = 12,
+ WL_HE_CMD_ENAB = 0u,
+ WL_HE_CMD_FEATURES = 1u,
+ WL_HE_CMD_TWT_SETUP = 2u,
+ WL_HE_CMD_TWT_TEARDOWN = 3u,
+ WL_HE_CMD_TWT_INFO = 4u,
+ WL_HE_CMD_BSSCOLOR = 5u,
+ WL_HE_CMD_PARTIAL_BSSCOLOR = 6u,
+ WL_HE_CMD_CAP = 7u,
+ WL_HE_CMD_STAID = 8u,
+ WL_HE_CMD_MUEDCA = 9u,
+ WL_HE_CMD_RTSDURTHRESH = 10u,
+ WL_HE_CMD_PEDURATION = 11u,
+ WL_HE_CMD_TESTBED_MODE = 12u,
+ WL_HE_CMD_OMI_CONFIG = 13u,
+ WL_HE_CMD_OMI_STATUS = 14u,
+ WL_HE_CMD_OMI_ULMU_THROTTLE = 15u,
+ WL_HE_CMD_ULMU_DISABLE_POLICY = 16u,
+ WL_HE_CMD_ULMU_DISABLE_STATS = 17u,
+ WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP = 18u,
+ WL_HE_CMD_SR_PROHIBIT = 19u,
WL_HE_CMD_LAST
};
+enum {
+ WL_HE_MUEDCA_IE = 0,
+ WL_HE_MUEDCA_SHM = 1,
+ WL_HE_MUEDCA_LAST
+};
+
+#ifdef WL11AX
+
+/* struct for dump MU EDCA IE/SHM paramters */
+typedef struct wl_he_muedca_ie_v1 {
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ uint8 mu_qos_info;
+ he_mu_ac_param_record_t param_ac[AC_COUNT];
+} wl_he_muedca_ie_v1_t;
+
+typedef wl_he_muedca_ie_v1_t wl_he_muedca_ie_t;
+
+#define WL_HE_MUEDCA_VER_1 1
+
+#endif /* WL11AX */
+
/* TWT top level command IDs */
enum {
- WL_TWT_CMD_ENAB = 0,
- WL_TWT_CMD_SETUP = 1,
- WL_TWT_CMD_TEARDOWN = 2,
- WL_TWT_CMD_INFO = 3,
- WL_TWT_CMD_AUTOSCHED = 4,
- WL_TWT_CMD_EARLY_TERM_TIME = 6,
+ WL_TWT_CMD_ENAB = 0,
+ WL_TWT_CMD_SETUP = 1,
+ WL_TWT_CMD_TEARDOWN = 2,
+ WL_TWT_CMD_INFO = 3,
+ WL_TWT_CMD_AUTOSCHED = 4,
+ WL_TWT_CMD_STATS = 5,
+ WL_TWT_CMD_EARLY_TERM_TIME = 6,
+ WL_TWT_CMD_RESP_CONFIG = 7,
+ WL_TWT_CMD_SPPS_ENAB = 8,
WL_TWT_CMD_LAST
};
/* HEB top level command IDs */
enum {
- WL_HEB_CMD_ENAB = 0,
- WL_HEB_CMD_NUM_HEB = 1,
- WL_HEB_CMD_COUNTERS = 2,
- WL_HEB_CMD_CLEAR_COUNTERS = 3,
- WL_HEB_CMD_CONFIG = 4,
- WL_HEB_CMD_STATUS = 5,
+ WL_HEB_CMD_ENAB = 0,
+ WL_HEB_CMD_NUM_HEB = 1,
+ WL_HEB_CMD_COUNTERS = 2,
+ WL_HEB_CMD_CLEAR_COUNTERS = 3,
+ WL_HEB_CMD_CONFIG = 4,
+ WL_HEB_CMD_STATUS = 5,
WL_HEB_CMD_LAST
};
wl_heb_reg_status_v1_t heb_status[1];
} wl_heb_status_v1_t;
+/* HWA */
+#define WL_HWA_VER_1 1
+
+/* HWA top level command IDs */
+typedef enum wl_hwa_cmd_type {
+ WL_HWA_CMD_ENAB = 0,
+ WL_HWA_CMD_CAPS = 1,
+ WL_HWA_CMD_COUNTERS = 2,
+ WL_HWA_CMD_CLRCNTS = 3,
+ WL_HWA_CMD_REGDUMP = 4,
+ WL_HWA_CMD_INDUCE_ERR = 5,
+ WL_HWA_CMD_LAST
+} wl_hwa_cmd_type_t;
+
+typedef struct wl_hwa_cnts_info_v1 {
+ uint16 cnt_rxs_filter; /* #filters added */
+ uint16 cnt_rxs_chainable; /* #rxchainable matched */
+} wl_hwa_cnts_info_v1_t;
+
+/* HWA dump info structures */
+typedef struct wl_hwa_hwcaps_info_v1 {
+ uint16 up; /* is hwa init'd/deint'd */
+ uint16 corerev; /* hwa core revision */
+ uint32 submodules_mask; /* mask for hwa submodules that are enabled */
+} wl_hwa_hwcaps_info_v1_t;
+
+typedef struct wl_hwa_cnts_v1 {
+ /* structure control */
+ uint16 version; /* structure version */
+ uint16 length; /* data length (starting after this field) */
+ wl_hwa_cnts_info_v1_t hwa_cnts_info[]; /* variable length array with hwa counters */
+} wl_hwa_cnts_v1_t;
+
+/* All submodules, order is important and define order of initialization. */
+/* Not use enumeration here because these defines are also used in macro */
+#define HWA_SUBMODULES_COMMON 0 /**< Common */
+#define HWA_SUBMODULES_TXPOST 1u /**< TxPost 3a */
+#define HWA_SUBMODULES_RXPOSTFILL 2u /**< RxPost and Fill 1a/1b */
+#define HWA_SUBMODULES_TXDMA 3u /**< TxDMA 3b */
+#define HWA_SUBMODULES_TXS 4u /**< TxStatus 4a */
+#define HWA_SUBMODULES_BUFMGR 5u /**< Buffer Manager, RX and TX. Do this last */
+#define HWA_SUBMODULES_CPL 6u /**< Completion 2b/4b */
+#define HWA_SUBMODULES_RXS 7u /**< RxStatus 2a */
+#define HWA_SUBMODULES_NUM 8u /**< number of submodules */
+
+#define HWA_SUBMODULES_ALL 0xFF /* Bitmaps for all submodules */
+#ifdef HWA
+#define HWA_SUBMODULE_MASK(submodule) (1u << (submodule))
+#else
+#define HWA_SUBMODULE_MASK(submodule) (0)
+#endif /* HWA */
+/*
+ * NOTES:
+ * wl_twt_sdesc_t is used to support both broadcast TWT and individual TWT.
+ * Value in bit[0:2] in 'flow_id' field is interpreted differently:
+ * - flow id for individual TWT (when WL_TWT_FLOW_FLAG_BROADCAST bit is NOT set
+ * in 'flow_flags' field)
+ * - flow id as defined in Table 8-248l1 for broadcast TWT (when
+ * WL_TWT_FLOW_FLAG_BROADCAST bit is set)
+ * In latter case other bits could be used to differentiate different flows
+ * in order to support multiple broadcast TWTs with the same flow id.
+ */
+
/* TWT Setup descriptor */
-typedef struct {
+typedef struct wl_twt_sdesc {
/* Setup Command. */
- uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h,
- * valid when bcast_twt is FALSE.
- */
- /* Flow attributes */
- uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */
- uint8 flow_id; /* must be between 0 and 7 */
- /* Target Wake Time */
+ uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */
+ uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */
+ uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */
+ uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */
+ uint8 channel; /* Twt channel - Not used for now */
+ uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
+ uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */
uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
uint32 wake_time_h; /* target wake time - BSS TSF (us) */
uint32 wake_time_l;
uint32 wake_dur; /* target wake duration in unit of microseconds */
uint32 wake_int; /* target wake interval */
+ uint32 btwt_persistence; /* Broadcast TWT Persistence */
+ uint32 wake_int_max; /* max wake interval(uS) for TWT */
+ uint8 duty_cycle_min; /* min duty cycle for TWT(Percentage) */
+ uint8 pad;
+ /* deprecated - to be removed */
+ uint16 li;
- uint16 bid; /* must be between 0 and 255. Set 0xFFFF for auto assignment */
- uint16 li; /* Listen interval: Units in number of beacon intervals */
- uint8 channel; /* twt channel */
- uint8 pad[3];
} wl_twt_sdesc_t;
/* Flow flags */
-#define WL_TWT_FLOW_FLAG_BROADCAST (1 << 0)
-#define WL_TWT_FLOW_FLAG_IMPLICIT (1 << 1)
-#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1 << 2)
-#define WL_TWT_FLOW_FLAG_TRIGGER (1 << 3)
-#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1 << 4)
-#define WL_TWT_FLOW_FLAG_REQUEST (1 << 5)
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1u << 0u)
+#define WL_TWT_FLOW_FLAG_TRIGGER (1u << 1u)
+#define WL_TWT_FLOW_FLAG_REQUEST (1u << 2u)
+#define WL_TWT_FLOW_FLAG_PROTECT (1u << 3u)
+#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 4u)
+#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 5u)
+
+/* Deprecated - To be removed */
+#define WL_TWT_FLOW_FLAG_BROADCAST (1u << 5u)
+#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1u << 6u)
+#define WL_TWT_FLOW_FLAG_IMPLICIT (1u << 7u)
/* Flow id */
-#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */
-#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */
-#define WL_TWT_FLOW_ID_GID_SHIFT 4
+#define WL_TWT_FLOW_ID_FID 0x07u /* flow id */
+#define WL_TWT_FLOW_ID_GID_MASK 0x70u /* group id - broadcast TWT only */
+#define WL_TWT_FLOW_ID_GID_SHIFT 4u
-#define WL_TWT_INV_BCAST_ID 0xFFFFu
+#define WL_TWT_INV_BCAST_ID 0xFFu
#define WL_TWT_INV_FLOW_ID 0xFFu
-#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF
+/* auto flow_id */
+#define WL_TWT_SETUP_FLOW_ID_AUTO 0xFFu
+/* auto broadcast ID */
+#define WL_TWT_SETUP_BCAST_ID_AUTO 0xFFu
+/* Infinite persistence for broadcast schedule */
+#define WL_TWT_INFINITE_BTWT_PERSIST 0xFFFFFFFFu
+
+/* should be larger than what chip supports */
+#define WL_TWT_STATS_MAX_BTWT 4u
+#define WL_TWT_STATS_MAX_ITWT 4u
/* Wake type */
/* TODO: not yet finalized */
-#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is
+#define WL_TWT_TIME_TYPE_BSS 0u /* The time specified in wake_time_h/l is
* the BSS TSF time.
*/
-#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset
+#define WL_TWT_TIME_TYPE_OFFSET 1u /* The time specified in wake_time_h/l is an offset
* of the TSF time when the iovar is processed.
*/
+#define WL_TWT_TIME_TYPE_AUTO 2u /* The target wake time is chosen internally by the FW */
-#define WL_TWT_SETUP_VER 0
+#define WL_TWT_SETUP_VER 0u
/* HE TWT Setup command */
-typedef struct {
+typedef struct wl_twt_setup {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- /* session id */
- uint16 dialog; /* an arbitrary number to identify the seesion */
+ uint8 pad[2];
/* setup descriptor */
wl_twt_sdesc_t desc;
+
+ /* deprecated - to be removed */
+ uint16 dialog;
+ uint8 pad1[2];
} wl_twt_setup_t;
-#define WL_TWT_TEARDOWN_VER 0
+/* deprecated -to be removed */
+#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF
+
+#define WL_TWT_TEARDOWN_VER 0u
+
+/* twt teardown descriptor */
+typedef struct wl_twt_teardesc {
+ uint8 negotiation_type;
+ uint8 flow_id; /* must be between 0 and 7 */
+ uint8 bid; /* must be between 0 and 31 */
+ bool alltwt; /* all twt teardown - 0 or 1 */
+} wl_twt_teardesc_t;
/* HE TWT Teardown command */
-typedef struct {
+typedef struct wl_twt_teardown {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- /* flow attributes */
- uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above.
- * (only BROADCAST) is applicable)
- */
- uint8 flow_id; /* must be between 0 and 7 */
- uint16 bid; /* must be between 0 and 255 */
+ wl_twt_teardesc_t teardesc; /* Teardown descriptor */
+
+ /* deprecated - to be removed */
+ uint8 flow_flags;
+ uint8 flow_id;
+ uint8 bid;
+ uint8 pad;
} wl_twt_teardown_t;
/* twt information descriptor */
-typedef struct {
+typedef struct wl_twt_infodesc {
uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */
uint8 flow_id;
- uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
- uint8 pad[1];
+ uint8 pad[2];
uint32 next_twt_h;
uint32 next_twt_l;
-} wl_twt_idesc_t;
+ /* deprecated - to be removed */
+ uint8 wake_type;
+ uint8 pad1[3];
+} wl_twt_infodesc_t;
/* Flow flags */
+#define WL_TWT_INFO_FLAG_ALL_TWT (1u << 0u) /* All TWT */
+#define WL_TWT_INFO_FLAG_RESUME (1u << 1u) /* 1 is TWT Resume, 0 is TWT Suspend */
+
+/* deprecated - to be removed */
#define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */
#define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */
#define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */
-#define WL_TWT_INFO_FLAG_RESUME (1 << 4) /* 1 is TWT Resume, 0 is TWT Suspend */
+typedef wl_twt_infodesc_t wl_twt_idesc_t;
-#define WL_TWT_INFO_VER 0
+#define WL_TWT_INFO_VER 0u
/* HE TWT Information command */
-typedef struct {
+typedef struct wl_twt_info {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
uint8 pad[2];
- /* Temporary change. to be removed */
+ wl_twt_infodesc_t infodesc; /* information descriptor */
+ /* deprecated - to be removed */
wl_twt_idesc_t desc;
- /* information descriptor */
- wl_twt_idesc_t idesc;
} wl_twt_info_t;
+#define WL_TWT_PEER_STATS_VERSION_1 1u
+typedef struct wl_twt_peer_stats_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint8 id;
+ uint8 flow_flags;
+ uint8 PAD[2];
+ uint32 sp_seq; /* sequence number of the service period */
+ uint32 tx_ucast_pkts;
+ uint32 tx_pkts_min;
+ uint32 tx_pkts_max;
+ uint32 tx_pkts_avg;
+ uint32 tx_failures;
+ uint32 rx_ucast_pkts;
+ uint32 rx_pkts_min;
+ uint32 rx_pkts_max;
+ uint32 rx_pkts_avg;
+ uint32 rx_pkts_retried;
+} wl_twt_peer_stats_v1_t;
+
+#define WL_TWT_STATS_VERSION_1 1
+typedef struct wl_twt_stats_v1 {
+ uint16 version;
+ uint16 length;
+ uint32 num_stats; /* number of peer stats in the peer_stats_list */
+ wl_twt_peer_stats_v1_t peer_stats_list[];
+} wl_twt_stats_v1_t;
+
+#define WL_TWT_STATS_CMD_VERSION_1 1
+#define WL_TWT_STATS_CMD_FLAGS_RESET (1u << 0u)
+/* HE TWT stats command */
+typedef struct wl_twt_stats_cmd_v1 {
+ uint16 version;
+ uint16 length;
+ struct ether_addr peer;
+ uint8 PAD[2];
+ uint16 flags; /* see WL_TWT_STATS_CMD_FLAGS */
+ uint8 num_fid;
+ uint8 num_bid;
+ uint8 fid_list[WL_TWT_STATS_MAX_ITWT];
+ uint8 bid_list[WL_TWT_STATS_MAX_BTWT];
+} wl_twt_stats_cmd_v1_t;
+
+#define WL_TWT_RESP_CFG_VER 0u
+
+#define WL_TWT_CMD_RESP_CFG_TYPE_ALTERNATE 0u
+#define WL_TWT_CMD_RESP_CFG_TYPE_DICTATE 1u
+/* HE TWT resp command */
+typedef struct wl_twt_resp_cfg {
+ /* structure control */
+ uint16 version; /* Structure version */
+ uint16 length; /* Data length (starting after this field) */
+ uint8 dc_max; /* Max supported duty cycle for single TWT */
+ uint8 resp_type; /* Resp. type(Alt/dict) if duty cycle>max duty cycle */
+} wl_twt_resp_cfg_t;
+
/* Current version for wlc_clm_power_limits_req_t structure and flags */
#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1
/* "clm_power_limits" iovar request structure */
WL_MBO_CMD_NBR_INFO_CACHE = 11,
WL_MBO_CMD_ANQPO_SUPPORT = 12,
WL_MBO_CMD_DBG_EVENT_CHECK = 13,
+ WL_MBO_CMD_EVENT_MASK = 14,
/* Add before this !! */
WL_MBO_CMD_LAST
};
WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8,
WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9,
WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa,
- WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb
+ WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb,
+ WL_MBO_XTLV_BIT_MASK = 0xc
};
+/* event bit mask flags for MBO */
+#define MBO_EVT_BIT_MASK_CELLULAR_SWITCH 0x0001 /* Evt bit mask to enab cellular switch */
+#define MBO_EVT_BIT_MASK_BTM_REQ_RCVD 0x0002 /* Evt bit mask to enab BTM req rcvd */
+
typedef struct wl_mbo_counters {
/* No of transition req recvd */
uint16 trans_req_rcvd;
#define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */
#define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */
#define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */
-#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */
-#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */
#define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */
#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */
} base_regs;
struct {
uint32 resetctrl;
- uint32 itopoobb;
+ uint32 extrsrcreq;
} wrapper_regs;
} arm_regs;
struct {
uint32 clockcontrolstatus_val;
} base_regs;
struct {
- uint32 itopoobb;
+ uint32 extrsrcreq;
} wrapper_regs;
} pcie_regs;
struct {
} base_regs;
struct {
uint32 resetctrl;
- uint32 itopoobb;
+ uint32 extrsrcreq;
uint32 ioctrl;
uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
} wrapper_regs;
#define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_0
#endif // endif
+/* A wrapper structure for all versions of SSSR register information structures */
+typedef union sssr_reg_info {
+ sssr_reg_info_v0_t rev0;
+ sssr_reg_info_v1_t rev1;
+ sssr_reg_info_v2_t rev2;
+} sssr_reg_info_cmn_t;
+
/* ADaptive Power Save(ADPS) structure definition */
#define WL_ADPS_IOV_MAJOR_VER 1
#define WL_ADPS_IOV_MINOR_VER 0
uint8 flag;
} wlc_btc_2gchain_dis_t;
+/* TDLS structure definition */
+#define WL_TDLS_T_VERSION_V1 1
+typedef struct wl_tdls_dump_summary_v1 {
+ uint16 version;
+ uint16 length; /* length of the entire structure */
+ uint32 txsetupreq; /* tdls setup req sent */
+ uint32 txsetupresp; /* tdls setup resp sent */
+ uint32 txsetupcfm; /* tdls setup confirm sent */
+ uint32 txteardown; /* tdls teardwon frames sent */
+ uint32 txptireq; /* tdls pti req frames sent */
+ uint32 txptiresp; /* tdls pti resp frames sent */
+ uint32 txchswreq; /* tdls chsw req frames sent */
+ uint32 txchswresp; /* tdls chsw resp frame sent */
+ uint32 rxsetupreq; /* tdls setup req rcvd */
+ uint32 rxdsetupresp; /* tdls setup resp rcvd */
+ uint32 rxsetupcfm; /* tdls setup confirm rcvd */
+ uint32 rxteardown; /* tdls teardown frames rcvd */
+ uint32 rxptireq; /* tdls pti req frames rcvd */
+ uint32 rxptiresp; /* tdls pti resp frames rcvd */
+ uint32 rxchswreq; /* tdls chsw req frames rcvd */
+ uint32 rxchswresp; /* tdls chsw resp frames rcvd */
+ uint32 discard; /* frames discarded due to full buffer */
+ uint32 ubuffered; /* frames buffered by TDLS txmod */
+ uint32 buf_reinserted; /* frames reinserted */
+ uint32 idletime; /* time since no traffic on tdls link */
+ uint32 uptime; /* time since tdls link connected */
+ uint32 tx_cnt; /* frames txed over tdls link */
+ uint32 rx_cnt; /* frames rcvd over tdls link */
+ uint32 blist_cnt; /* number of tdls black list */
+ uint32 scb_flags; /* connected tdls scb flags */
+ struct ether_addr peer_addr; /* connected peer addr */
+ uint8 padding[2];
+} wl_tdls_dump_summary_v1_t;
+
#define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1
#define WLC_BTC_2GCHAIN_DIS_VER1 0x1
#define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6
/* --- End BTCX WiFi Protection --- */
+/* --- BTCX ULMU disable (btc_ulmu_config iovar) --- */
+
+/* Version number */
+#define WL_BTC_ULMU_CONFIG_VER_1 1
+typedef struct wl_btc_ulmu_config_v1 {
+ uint16 version; /* btc_ulmu_config version */
+ uint16 len; /* Total length */
+ uint32 ulmu_bt_task_bm; /* BT Task bimtap for ULMU disable */
+ uint32 ulmu_bt_period_th; /* BT period thresh for ULMU disable */
+} wl_btc_ulmu_config_v1_t;
+
+/* --- End BTCX ULMU config --- */
+
+#define RPSNOA_IOV_MAJOR_VER 1
+#define RPSNOA_IOV_MINOR_VER 1
+#define RPSNOA_IOV_MAJOR_VER_SHIFT 8
+#define RPSNOA_IOV_VERSION \
+ ((RPSNOA_IOV_MAJOR_VER << RPSNOA_IOV_MAJOR_VER_SHIFT)| RPSNOA_IOV_MINOR_VER)
+
enum wl_rpsnoa_cmd_ids {
WL_RPSNOA_CMD_ENABLE = 1,
WL_RPSNOA_CMD_STATUS,
WL_IFSTATS_XTLV_MAC_ADDR = 3,
WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */
WL_IFSTATS_XTLV_BUS_PCIE = 5,
+ WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS = 6,
+ WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH = 7,
+ /* history of blocks freed most recently */
+ WL_STATS_XTLV_FBINFO_STATS = 8,
/* Report data across all SCBs using ecounters */
/* STA_info ecounters */
WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103,
/* Global NAN stats */
WL_IFSTATS_XTLV_NAN_STATS = 0x104,
+ WL_IFSTATS_XTLV_CHAN_STATS = 0x105,
+ /* TDLS state */
+ WL_IFSTATS_XTLV_IF_TDLS_STATE = 0x106,
+ WL_IFSTATS_XTLV_KEY_PLUMB_INFO = 0x107,
+ /* HE TX related stats */
+ WL_IFSTATS_XTLV_HE_TXMU_STATS = 0x108,
/* Per-slice information
* Per-interface reporting could also include slice specific data
WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D,
WL_SLICESTATS_XTLV_HIST_TX_STATS = 0x30E,
WL_SLICESTATS_XTLV_HIST_RX_STATS = 0x30F,
+ /* TX histograms */
+ WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS = 0x310,
+ /* TX queue depth */
+ WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH = 0x311,
+ /* Latency instrumentation debug */
+ WL_STATS_XTLV_WL_QUEUE_STOP = 0x312,
+ /* Beamforming counters */
+ WL_IFSTATS_XTLV_WL_SLICE_TXBF = 0x313,
+ /* Per-slice BTCOEX task duration stats */
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX_TSKDUR_STATS = 0x314,
/* Per-interface */
/* XTLV container for reporting */
WL_IFSTATS_XTLV_IF = 0x501,
/* AMPDU stats on per-IF */
WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505,
WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506,
- WL_IFSTATS_XTLV_WL_PWRSTATS_AWDL = 0x507,
WL_IFSTATS_XTLV_IF_LQM = 0x508,
/* Interface specific state capture in periodic fashion */
WL_IFSTATS_XTLV_IF_PERIODIC_STATE = 0x509,
/* Event statistics on per-IF */
WL_IFSTATS_XTLV_IF_EVENT_STATS = 0x50A,
+ /* Infra HE specific */
+ WL_IFSTATS_XTLV_INFRA_SPECIFIC_HE = 0x50B,
+ /* Roam statistics */
+ WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC = 0x50C,
+ WL_IFSTATS_XTLV_ROAM_STATS_EVENT = 0x50D,
/* ecounters for nan */
/* nan slot stats */
- WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601
+ WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601,
+ /* Ecounters for NDP session status */
+ WL_STATS_XTLV_NDP_SESSION_STATUS = 0x602,
+ /* NAN disc frame status ecounters */
+ WL_STATS_XTLV_NAN_DISC_FRM_STATUS = 0x603
};
/* current version of wl_stats_report_t structure for request */
uint32 tbtt;
} wl_if_infra_stats_t;
+#define WL_INFRA_STATS_HE_VERSION_V1 (1u)
+/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC_HE */
+typedef struct wl_infra_stats_he {
+ uint16 version; /**< version of the structure */
+ uint16 length;
+ uint32 PAD; /**< Explicit padding */
+
+ /* DL SU MPDUs and total number of bytes */
+ uint64 dlsu_mpdudata;
+ uint64 dlsu_mpdu_bytes;
+
+ /* DL MUMIMO MPDUs and total number of bytes */
+ uint64 dlmumimo_mpdudata;
+ uint64 dlmumimo_mpdu_bytes;
+
+ /* DL OFDMA MPDUs and total number of bytes */
+ uint64 dlofdma_mpdudata;
+ uint64 dlofdma_mpdu_bytes;
+
+ /* UL SU MPDUs and total number of bytes */
+ uint64 ulsu_mpdudata;
+ uint64 ulsu_mpdu_bytes;
+
+ /* ULOFDMA MPSUs and total number of bytes */
+ uint64 ulofdma_mpdudata;
+ uint64 ulofdma_mpdu_bytes;
+} wl_if_infra_stats_he_t;
+
#define LTECOEX_STATS_VER 1
typedef struct wlc_ltecoex_stats {
*/
#define WL_NAN_SLOT_ECOUNTERS_VERSION_1 1
+#define WL_NAN_SLOT_ECOUNTERS_VERSION_2 2
typedef struct wl_nan_slot_ecounters_v1 {
uint16 version; /* version field */
nan_sched_stats_t sched; /* sched stats */
wl_nan_mac_stats_t mac; /* mac stats */
} wl_nan_slot_ecounters_v1_t;
+
+typedef struct wl_nan_slot_ecounters_v2 {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
+ uint16 cur_slot_idx; /* cur nan slot index */
+ uint16 pad;
+ nan_sched_stats_t sched; /* sched stats */
+ wl_nan_mac_stats_t mac; /* mac stats */
+ /* for v2 */
+ uint16 bcn_rx_drop_rssi; /* Beacon received but ignored due to weak rssi */
+ uint16 bcn_rx_drop_rssi_5g; /* 5G Beacon received but ignored due to weak rssi */
+ uint16 cnt_rssi_close; /* cnt of beacon rssi > rssi_close received */
+ uint16 cnt_rssi_close_5g; /* cnt of 5G beacon rssi > rssi_close received */
+ uint16 cnt_rssi_mid; /* cnt of beacon rssi > rssi_middle received */
+ uint16 cnt_rssi_mid_5g; /* cnt of 5G beacon rssi > rssi_middle received */
+ uint16 bcn_txfail; /* Beacon sending failure count */
+ uint16 bcn_txfail_5g; /* sending 5G beacon failure count */
+} wl_nan_slot_ecounters_v2_t;
+
+/* WL_STATS_XTLV_NDP_SESSION_STATUS for ecounters */
+#define WL_NAN_SESSION_STATUS_EC_VERSION_1 1
+typedef struct wl_nan_ndp_session_status_v1_s {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint8 role; /* Role of NAN device */
+ uint8 ndp_id; /* local NDP ID */
+ uint8 state; /* NDP state */
+ uint8 nan_sec_csid; /* security csid */
+ struct ether_addr lndi_addr; /* Local NDI addr */
+ struct ether_addr pnmi_addr; /* Peer NMI addr */
+ struct ether_addr pndi_addr; /* Peer NDI addr */
+ uint8 dpe_state; /* DPE state to know where timeout/dpend has come */
+ uint8 pad;
+} wl_nan_ndp_session_status_v1_t;
+
+/* WL_STATS_XTLV_NAN_DISC_FRM_STATUS for ecounters */
+#define WL_NAN_DISC_FRM_STATUS_EC_VERSION_1 1
+typedef struct wl_nan_disc_frame_status_v1_s {
+ uint16 version; /* version field */
+ uint16 length; /* struct length starting from version */
+ uint8 type; /* wl_nan_frame_type_t */
+ uint8 status; /* For TX status, success or failure */
+ uint8 reason_code; /* to identify reason when status is failure */
+ uint8 inst_id; /* Publish or subscribe instance id */
+ uint8 req_id; /* Requestor instance id */
+ uint8 pad;
+ uint16 token; /* seq num to keep track of pkts sent by host */
+} wl_nan_disc_frame_status_v1_t;
/*
* BT log definitions
*/
uint8 bssid [6]; /* padding to get 32 bits alignment */
} rmc_candidate_info_v1_t;
-#define WL_FILTER_IE_VERSION 1
+#define WL_FILTER_IE_VERSION 1 /* deprecated */
enum wl_filter_ie_options {
- WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */
- WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */
- WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */
- WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */
- WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */
+ WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */
+ WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */
+ WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */
+ WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */
+ WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */
};
typedef struct wl_filter_ie_tlv {
- uint16 id;
- uint16 len; /* sub option length + pattern length */
- uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */
+ uint16 id; /* elelment id [ + ext id ] */
+ uint16 len; /* sub option length + pattern length */
+ uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */
} wl_filter_ie_tlv_t;
-typedef struct wl_filter_ie_iov {
- uint16 version; /* Structure version */
- uint16 len; /* Total length of the structure */
- uint16 fixed_length; /* Total length of fixed fields */
- uint8 option; /* Filter action - check for suboption */
- uint8 pad[1]; /* Align to 4 bytes */
- uint32 pktflag; /* frame type */
- uint8 tlvs[]; /* variable data (zero in for list ,clearall) */
+#define WL_FILTER_IE_VERSION_1 1 /* the latest version */
+typedef struct wl_filter_ie_iov_v1 {
+ uint16 version; /* Structure version */
+ uint16 len; /* Total length of the structure */
+ uint16 fixed_length; /* Total length of fixed fields */
+ uint8 option; /* Filter action - check for suboption */
+ uint8 pad[1]; /* Align to 4 bytes */
+ uint32 pktflag; /* frame type - FC_XXXX */
+ uint8 tlvs[]; /* variable data (zero in for list ,clearall) */
} wl_filter_ie_iov_v1_t;
/* Event aggregation config */
#define EVENT_AGGR_CFG_VERSION 1
-#define EVENT_AGGR_DISABLED 0x0
-#define EVENT_AGGR_ENABLED 0x1
+#define EVENT_AGGR_DISABLED 0x0
+#define EVENT_AGGR_ENABLED 0x1
#define EVENT_AGGR_BUFSIZE_MAX 1512
#define EVENT_AGGR_BUFSIZE_MIN 512
uint32 band; /* current band (2G/5G) for which the event is received */
} nan_slot_event_data_t;
+#ifndef BCMUTILS_ERR_CODES
+
/* SAE (Simultaneous Authentication of Equals) error codes.
* These error codes are local.
*/
-#define WL_SAE_E_BASE -3072
-
/* SAE status codes are reserved from -3072 to -4095 (1K) */
-enum WL_SAE_E_STATUS_CODES {
+enum wl_sae_status {
WL_SAE_E_AUTH_FAILURE = -3072,
/* Discard silently */
WL_SAE_E_AUTH_DISCARD = -3073,
WL_PMK_E_PSK_NOMEM = -5124
} wlc_pmk_psk_hash_status_t;
+#endif /* BCMUTILS_ERR_CODES */
+
/* Block Channel */
#define WL_BLOCK_CHANNEL_VER_1 1u
typedef enum sdc_trigger_types {
SDC_TYPE_STA_ONBOARD_DEBUG = 1,
+ SDC_TYPE_SCAN_DEBUG = 2,
#ifdef SDC_TEST
/*
* This is for test purpose only. Don't assign specific value.
#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_1 (1u)
#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_1 (1u)
-/* [see HIST_TOSS_xxxx macros]
+/* Format of running toss reasons with seq
+ * [see HIST_TOSS_xxxx macros]
* bits [7..0] : 8 bits : toss sts.
* [11..8] : cfgidx
* [15..12]: ac
#define HIST_TOSS_SEQ_POS (16u)
#define HIST_TOSS_SEQ_MASK (0xffff0000u)
-#define WLC_SDC_COMPACT_TOSS_REASON(sts, ifidx, ac, seq) \
- ((sts) | ((ifidx) << HIST_TOSS_CFGIDX_POS) | ((ac) << HIST_TOSS_AC_POS) | \
- (seq << HIST_TOSS_SEQ_POS))
+/* Format of toss reasons with count
+ * bits [15..0] : 16 bits : toss reason
+ * bits [31..16]: 16 bits : count
+ */
+#define HIST_TOSS_RC_REASON_POS (0u)
+#define HIST_TOSS_RC_REASON_MASK (0xffffu)
+#define HIST_TOSS_RC_COUNT_POS (16u)
+#define HIST_TOSS_RC_COUNT_MASK (0xffff0000u)
typedef struct {
uint16 version;
uint32 hist_toss_counts[WLC_HIST_TOSS_LEN]; /* toss counts corr to reasons */
} wl_hist_compact_toss_stats_v1_t;
+#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_2 (2u)
+#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_2 (2u)
+
+typedef struct {
+ uint16 version;
+ uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */
+ uint8 htr_num; /* number of elements in htr_running or htr_rc */
+ uint16 htr_rnidx; /* htr_running[rnidx-1] has latest data */
+ uint16 htr_rcidx; /* htr_rc[rcidx-1] has latest data */
+ uint32 htr_running[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
+ * per WLC_SDC_COMPACT_TOSS_REASON() format
+ */
+ uint32 htr_rn_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_running data */
+ uint32 htr_rc[WLC_HIST_TOSS_LEN]; /* last 8 toss reasons and counts in
+ * WLC_SDC_COMPACT_TOSS_RC() format
+ */
+ uint32 htr_rc_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_rc */
+} wl_hist_compact_toss_stats_v2_t;
+
/* ***END of SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */
#endif /* BCM_SDC */
enum wl_sc_cmd {
WL_SC_CMD_DBG = 0,
WL_SC_CMD_CNX = 1,
+ WL_SC_CMD_CAP = 2,
+ WL_SC_CMD_CONFIG = 3,
WL_SC_CMD_LAST
};
+/* WBUS sub-command IDs for unit test */
+#define WL_WBUS_INA_SLOT_START 0x01u /**< Inactive slot start sub command ID. */
+#define WL_WBUS_INA_SLOT_STOP 0x02u /**< Inactive slot stop sub command ID. */
+
+/* WBUS (WiFi BT uniform scheduler) command IDs */
+enum wl_wbus_cmd {
+ WL_WBUS_CMD_VER = 0,
+ WL_WBUS_CMD_STATS = 1,
+ WL_WBUS_CMD_UNIT_TEST = 2,
+ WL_WBUS_CMD_BT_TEST = 3,
+ WL_WBUS_CMD_CAP = 4,
+ WL_WBUS_CMD_LAST
+};
+
+#define WBUS_BT_SCHED_TEST_PARAMS_VER_1 1
+
+typedef struct wbus_bt_sched_test_params_v1 {
+ uint16 version;
+ uint16 pad;
+ uint32 flags;
+ uint32 action;
+ uint32 duration;
+ uint32 interval;
+} wbus_bt_sched_test_params_v1_t;
+
+#define WBUS_BT_SCHED_ADD 0u
+#define WBUS_BT_SCHED_REMOVE 1u
+#define WBUS_BT_SCHED_INVALID 0xFFu
+
+#define KEY_UPDATE_INFO_VER_V1 1
+typedef struct key_update_info_v1
+{
+ uint16 ver;
+ uint8 pad;
+ uint8 flags;
+ uint32 timestamp;
+ uint32 algo;
+ uint32 key_flags;
+ struct ether_addr ea;
+ struct ether_addr sa;
+} key_update_info_v1_t;
+
+/* Key update flag bit field */
+#define KEY_UPD_FLAG_ADD_KEY 0x1 /* 0 - Removal, 1 - Add key */
+
+#ifdef WLLLW
+/* LLW Session */
+#define LLW_VERSION 1
+#define LLW_STATS_VERSION 1
+
+/* LLW roles */
+#define LLW_ROLE_SCHEDULER 0
+#define LLW_ROLE_CLIENT 1
+
+/* LLW modes */
+#define LLW_MODE_GAPS 0
+#define LLW_MODE_BACK_TO_BACK 1
+
+/* LLW session max values */
+#define LLW_MAX_SESSION_ID 10
+#define LLW_MAX_FLOW_ID 40
+#define LLW_MAX_CLIENT_NUM 15
+#define LLW_MAX_GAPS_PERIOD 20
+#define LLW_MAX_GAPS_VAR 3
+#define LLW_MAX_RETX_CNT 10
+#define LLW_MAX_AIFSN EDCF_AIFSN_MAX
+#define LLW_MAX_CWMIN EDCF_ECW_MAX
+#define LLW_MAX_CWMAX EDCF_ECW_MAX
+#define LLW_MAX_PER_NUMERATOR 100
+#define LLW_MAX_PER_DENOM 10000
+#define LLW_MAX_CLIENT_ID 15
+#define LLW_MAX_PKT_SIZE 1500
+#define LLW_MAX_PKT_NUM 10
+#define LLW_MAX_MCS 9
+#define LLW_MAX_NUM_STREAMS 8
+#define LLW_MAX_IBS 32
+
+/* Per LLW session config */
+/* WL_LLW_CMD_SESSION_CREATE, WL_LLW_CMD_SESSION_UPDATE */
+typedef struct wl_llw_session_cfg {
+ uint8 session_id;
+ uint8 role;
+ uint8 mode;
+ uint8 client_id;
+ uint8 gaps_period;
+ uint8 gaps_var;
+ uint8 aifsn;
+ uint8 ecwmin; /* exponent value for minimum contention window */
+ uint8 ecwmax; /* exponent value for maximum contention window */
+ uint8 mcs;
+ uint8 num_streams;
+ uint8 ibs; /* interblock spacing in usecs, for spacing between Transaction Blocks */
+ uint16 ul_pkt_size;
+ uint16 dl_pkt_size;
+ uint16 per_denom; /* denominator for target PER */
+ uint8 per_numerator; /* this value divided by per_denom gives the target PER */
+ uint8 dl_pkt_num;
+ uint8 client_num;
+ uint8 retx_cnt;
+ uint8 pwr_save;
+ uint8 auto_ba; /* automatic RX/TX BA session setup (no negotiation needed) */
+ uint8 if_index;
+ uint8 padding[3];
+ struct ether_addr multicast_addr;
+ struct ether_addr scheduler_addr;
+} wl_llw_session_cfg_t;
+
+/* WL_LLW_CMD_SESSION_DELETE, WL_LLW_CMD_SESSION_ENABLE, WL_LLW_CMD_SESSION_DISABLE, */
+/* WL_LLW_CMD_SESSION_GET */
+typedef struct wl_llw_session_cmd {
+ uint8 session_id;
+ uint8 padding[3];
+} wl_llw_session_cmd_t;
+
+/* LLW client config */
+/* WL_LLW_CMD_CLIENT_ADD, WL_LLW_CMD_CLIENT_DELETE, WL_LLW_CMD_CLIENT_GET */
+typedef struct wl_llw_client_cfg {
+ uint8 session_id;
+ uint8 client_id;
+ struct ether_addr mac;
+} wl_llw_client_cfg_t;
+
+/* Get list of session IDs from FW */
+/* WL_LLW_CMD_SESSION_ID */
+typedef struct llw_session_id_list {
+ uint8 id_count; /* Number of session IDs */
+ uint8 list[]; /* list of session IDs */
+} llw_session_id_list_t;
+
+/* LLW XTLV structures */
+typedef struct wl_llw_iov_cmd {
+ uint16 version;
+ uint8 cmd_cnt;
+ uint8 pad;
+ uint8 cmds[];
+} wl_llw_iov_cmd_t;
+
+typedef struct wl_llw_iov_sub_cmd {
+ uint16 type;
+ uint16 len;
+ union {
+ int32 status; /* Processed status - Set by FW */
+ uint32 options; /* Command Process Options - Set by Host */
+ } u;
+ uint8 data[];
+} wl_llw_iov_sub_cmd_t;
+
+/* to be used in type field of wl_llw_iov_sub_cmd_t structure while issuing LLW commands */
+typedef enum wl_llw_sub_cmd_xtlv_id {
+ WL_LLW_CMD_SESSION_ID,
+ WL_LLW_CMD_SESSION_CREATE,
+ WL_LLW_CMD_SESSION_DELETE,
+ WL_LLW_CMD_SESSION_UPDATE,
+ WL_LLW_CMD_SESSION_ENABLE,
+ WL_LLW_CMD_SESSION_DISABLE,
+ WL_LLW_CMD_SESSION_GET,
+ WL_LLW_CMD_CLIENT_ADD,
+ WL_LLW_CMD_CLIENT_DELETE,
+ WL_LLW_CMD_CLIENT_GET,
+ WL_LLW_CMD_FLOW_ADD,
+ WL_LLW_CMD_FLOW_DELETE,
+ WL_LLW_CMD_FLOW_GET,
+ WL_LLW_CMD_STATS
+} wl_llw_sub_cmd_xtlv_id_t;
+
+/* LLW stats */
+typedef enum wl_llw_xtlv {
+ WL_LLW_XTLV_STATS
+} wl_llw_xtlv_t;
+
+typedef struct wl_llw_stats {
+ uint32 txpackets;
+ uint32 txbytes;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txdropped;
+ uint32 tx_avg_q_time;
+ uint32 tx_min_q_time;
+ uint32 tx_max_q_time;
+ uint32 tx_avg_rem_lifetime;
+ uint32 tx_min_rem_lifetime;
+ uint32 tx_max_rem_lifetime;
+ uint32 rxpackets;
+ uint32 rxbytes;
+ uint32 rxfail;
+ uint32 rxretry;
+ uint32 txschedfrm;
+ uint32 retxschedfrm;
+} wl_llw_stats_t;
+
+typedef struct wl_llw_stats_hdr {
+ uint16 version;
+ uint16 stats_cnt;
+ uint32 tot_len;
+ uint8 stat_xtlvs[];
+} wl_llw_stats_hdr_t;
+
+/* WL_LLW_XTLV_STATS */
+typedef struct wl_llw_stats_xtlv {
+ uint16 type;
+ uint16 len;
+ uint8 stats[];
+} wl_llw_stats_xtlv_t;
+
+/* WL_LLW_CMD_STATS */
+typedef struct wl_llw_stats_cmd {
+ uint8 session_id;
+ uint8 client_id;
+ uint16 padding;
+} wl_llw_stats_cmd_t;
+
+/* LLW flow ring ID config */
+/* WL_LLW_CMD_FLOW_ADD, WL_LLW_CMD_FLOW_DELETE, WL_LLW_CMD_FLOW_GET */
+typedef struct wl_llw_flow_cfg {
+ uint8 session_id;
+ uint8 flow_id;
+ uint16 padding;
+} wl_llw_flow_cfg_t;
+#endif /* End of LLW Session */
+
+#define WL_OMI_CONFIG_VERSION_1 1u
+
+/* values for valid_bm */
+#define OMI_CONFIG_VALID_BMP_RXNSS 0x0001u
+#define OMI_CONFIG_VALID_BMP_BW 0x0002u
+#define OMI_CONFIG_VALID_BMP_ULMU_DISABLE 0x0004u
+#define OMI_CONFIG_VALID_BMP_TXNSTS 0x0008u
+#define OMI_CONFIG_VALID_BMP_ERSU_DISABLE 0x0010u
+#define OMI_CONFIG_VALID_BMP_DLMU_RSD_RCM 0x0020u
+#define OMI_CONFIG_VALID_BMP_ULMU_DATA_DISABLE 0x0040u
+#define OMI_CONFIG_VALID_BMP_ALL 0x0FFFu
+
+#define OMI_CONFIG_BW_MAX 3u
+
+typedef struct wl_omi_config {
+ uint16 valid_bm; /* validity bitmask for each config */
+ uint8 rxnss;
+ uint8 bw;
+ uint8 ulmu_disable;
+ uint8 txnsts;
+ uint8 ersu_disable;
+ uint8 dlmu_resound_rec;
+ uint8 ulmu_data_disable;
+ uint8 pad[3];
+} wl_omi_config_t;
+
+typedef struct wl_omi_req {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t config;
+} wl_omi_req_v1_t;
+
+/* Bits for ULMU disable reason */
+#define OMI_ULMU_DISABLED_HOST 0x01u /* Host has disabled through he omi */
+#define OMI_ULMU_DISABLED_NAN 0x04u /* Disabled due to NAN enabled */
+#define OMI_ULMU_DISABLED_BTCOEX 0x08u /* Disabled while in BT Coex activity */
+#define OMI_ULMU_DISABLED_LTECOEX 0x10u /* Disabled due to LTE Coex activity */
+#define OMI_ULMU_DISABLED_NON11AX_CONN 0x20u /* Disabled due to not associated to 11ax AP */
+#define OMI_ULMU_DISABLED_THROTTLE_ENABLE 0x40u /* Disabled due to throttle timer running */
+#define OMI_ULMU_DISABLED_TXCHAIN_DOWNGRADE 0x80u /* Disabled due to Txchain downgrade */
+#define OMI_ULMU_DISABLED_TX_DUTY_CYCLE 0x100u /* Disabled due to tx duty cycle */
+
+/* Bits for DLMU Resound Recommendation reason */
+#define OMI_DLMU_RSD_RCM_HOST (0x1u << 0u) /* Host directly set the bit */
+#define OMI_DLMU_RSD_RCM_MPF (0x1u << 1u) /* Set on MPF state change */
+
+#define WL_OMI_STATUS_VERSION_1 1u
+typedef struct wl_omi_status {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t omi_pending; /* OMI requests pending */
+ uint16 omi_data; /* current OM Control field for completed OMI requests */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
+} wl_omi_status_v1_t;
+
+#define WL_OMI_STATUS_VERSION_2 2u
+typedef struct wl_omi_status_v2 {
+ uint16 version;
+ uint16 len;
+ wl_omi_config_t omi_pending; /* OMI requests pending */
+ uint16 omi_data; /* Current OM Control field for completed OMI requests */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
+ uint32 dlmu_rsd_rcm_duration; /* Dur (ms) for which ResoundRecommentation is set */
+ uint16 dlmu_rsd_rcm_mpf_state; /* The MPF state value */
+ uint16 dlmu_rsd_rcm_reason; /* DL MU-MIMO recommendation reasons bitmap */
+} wl_omi_status_v2_t;
+
+#define WL_ULMU_DISABLE_STATS_VERSION_1 1u
+typedef struct wl_ulmu_disable_stats {
+ uint16 version;
+ uint16 len;
+ uint32 ulmu_disable_ts; /* UL OFDMA disabled timestamp (ms) */
+ uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
+ uint16 ulmu_disable_count; /* UL MU disable count during current infra association */
+ uint32 last_trig_rx_ts; /* Last trigger frame received timestamp (ms) */
+ uint16 trig_rx_count; /* No of trigger frames received after last UL OFDMA disable */
+ uint16 max_latency; /* Max latency by AP to re-act for UL OFDMA disable request (ms) */
+ uint16 min_latency; /* Min latency by AP to re-act for UL OFDMA disable request (ms) */
+ uint16 avg_latency; /* Avg latency by AP to re-act for UL OFDMA disable request (ms) */
+} wl_ulmu_disable_stats_v1_t;
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
+enum wl_tx_histogram_id {
+ WL_TX_HIST_TXQ_ID = 1,
+ WL_TX_HIST_LOW_TXQ_ID = 2,
+ WL_TX_HIST_SCBQ_ID = 3,
+ WL_TX_HIST_EXCUR_TXQ_ID = 4,
+ WL_TX_HIST_EXCUR_LOW_TXQ_ID = 5
+};
+
+/* common tx histogram structure */
+typedef struct wl_tx_hist {
+ uint16 hist_bmap; /* bit N indicates histogram follows for priority or fifo N */
+ uint16 hist_count; /* count of histograms in var len array */
+ uint32 hist[1]; /* var len array of histograms each prefix by hist length */
+} wl_tx_hist_t;
+
+#define WL_TX_HIST_FIXED_LEN (OFFSETOF(wl_tx_hist_t, hist))
+#define WL_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (WL_TX_HIST_FIXED_LEN + (num_hist) * \
+ (max_hist_size + 1) * sizeof(uint32))
+
+/* structure for WL_TX_HIST_TXQ, WL_TX_HIST_EXCUR_TXQ_ID */
+typedef struct wl_tx_hist_txq {
+ uint32 bsscfg_bmap; /* bitmap of bsscfg indexes associated with this queue */
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_txq_t;
+
+#define WL_TX_HIST_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_TXQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
+enum wl_txq_stop_histogram_id {
+ WL_TXQ_STOP_HIST_SW = 1,
+ WL_TXQ_STOP_HIST_HW = 2,
+ WL_TXQ_STOP_HIST_PKTS_SW = 3,
+ WL_TXQ_STOP_HIST_PKTS_HW = 4,
+ WL_TXQ_STOP_HIST_MAX = WL_TXQ_STOP_HIST_PKTS_HW
+};
+
+/* common tx histogram structure */
+typedef struct wl_txq_stop_hist {
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_txq_stop_hist_t;
+
+#define WL_TXQ_STOP_HIST_FIXED_LEN \
+ (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TXQ_STOP_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* structure for WL_TX_HIST_LOW_TXQ, WL_TX_HIST_EXCUR_LOW_TXQ_ID */
+typedef struct wl_tx_hist_low_txq {
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_low_txq_t;
+
+#define WL_TX_HIST_LOW_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_LOW_TXQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* structure for WL_TX_HIST_SCBQ */
+typedef struct wl_tx_hist_scbq {
+ struct ether_addr ea; /* ether addr of peer */
+ uint16 bsscfg_idx; /* bsscfg index */
+ wl_tx_hist_t tx_hist; /* tx histograms */
+} wl_tx_hist_scbq_t;
+
+#define WL_TX_HIST_SCBQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_TX_HIST_SCBQ_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH */
+enum wl_tx_queue_depth_id {
+ WL_TX_QUEUE_DEPTH_TXQ_ID = 1,
+ WL_TX_QUEUE_DEPTH_LOW_TXQ_ID = 2,
+ WL_TX_QUEUE_DEPTH_SCBQ_ID = 3,
+ WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID = 4,
+ WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID = 5
+};
+
+/* common tx queue depth structure */
+typedef struct wl_tx_queue_depth {
+ uint16 queue_depth_bmap; /* bitmap of queue depth in var len array */
+ uint16 queue_depth_count; /* count of queue depth in var len array */
+ uint16 queue_depth[1]; /* var len array of queue depth */
+} wl_tx_queue_depth_t;
+
+#define WL_TX_QUEUE_DEPTH_FIXED_LEN (OFFSETOF(wl_tx_queue_depth_t, queue_depth))
+#define WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth) \
+ (WL_TX_QUEUE_DEPTH_FIXED_LEN + (num_queue_depth) * \
+ sizeof(uint16))
+
+/* structure for WL_TX_QUEUE_DEPTH_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID */
+typedef struct wl_tx_queue_depth_txq {
+ uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_txq_t;
+
+#define WL_TX_QUEUE_DEPTH_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_TXQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* structure for WL_TX_QUEUE_DEPTH_LOW_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID */
+typedef struct wl_tx_queue_depth_low_txq {
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_low_txq_t;
+
+#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* structure for WL_TX_QUEUE_DEPTH_SCBQ_ID */
+typedef struct wl_tx_queue_depth_scbq {
+ struct ether_addr ea; /* ether addr of peer */
+ uint16 bsscfg_idx; /* bsscfg index */
+ wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
+} wl_tx_queue_depth_scbq_t;
+
+#define WL_TX_QUEUE_DEPTH_SCBQ_FIXED_LEN \
+ (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
+#define WL_TX_QUEUE_DEPTH_SCBQ_FULL_LEN(num_queue_depth) \
+ (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + \
+ WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS */
+enum wl_pcie_tx_histogram_id {
+ WL_PCIE_TX_HIST_ID = 1
+};
+
+/* structure for PCIE_TX_HIST_ID */
+typedef struct wl_pcie_tx_hist {
+ uint16 ring_id; /* PCIe ring id */
+ uint16 pad; /* 4-byte alignment */
+ wl_tx_hist_t tx_hist; /* hist_bmap:
+ * 0x1=tx histogram
+ * 0x2=tx status pending histogram
+ */
+} wl_pcie_tx_hist_t;
+
+#define WL_PCIE_TX_HIST_FIXED_LEN \
+ (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
+#define WL_PCIE_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
+ (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + \
+ WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
+
+/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH */
+enum wl_pcie_tx_queue_depth_id {
+ WL_PCIE_TX_QUEUE_DEPTH_ID = 1
+};
+
+/* structure for WL_PCIE_TX_QUEUE_DEPTH_ID */
+typedef struct wl_pcie_tx_queue_depth {
+ uint16 ring_id; /* PCIe ring id */
+ uint16 queue_depth; /* queue depth of ring id */
+ uint16 tx_status_pend; /* tx status pending of ring id */
+ uint16 pad; /* 4-byte alignment */
+} wl_pcie_tx_queue_depth_t;
+
+#define WL_PCIE_TX_QUEUE_DEPTH_FIXED_LEN sizeof(wl_pcie_tx_queue_depth_t)
+
+#define WL_WSEC_DEL_PMK_VER_V1 1u
+/* tlv ids for del pmk */
+#define WL_DEL_PMK_TLV_ID 1u
+#define WL_DEL_PMKID_TLV_ID 2u
+#define WL_DEL_PEER_ADDR_TLV_ID 3u
+typedef struct wl_wsec_del_pmk {
+ uint16 version;
+ uint16 length;
+ uint8 xtlvs[];
+} wl_wsec_del_pmk_t;
+#define WL_WSEC_DEL_PMK_FIXED_LEN_V1 OFFSETOF(wl_wsec_del_pmk_t, xtlvs)
+
+#define WLC_RC_ROAM_VER_1 1
+
+typedef struct wlc_rcroam {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_rcroam_t;
+
+typedef struct wlc_rcroam_info_v1 {
+ uint16 inactivity_period; /* inactivty monitor period */
+ uint16 roam_scan_timeout;
+ uint16 periodic_roam_scan_timeout;
+ uint8 roam_trig_step; /* roaming trigger step value */
+} wlc_rcroam_info_v1_t;
+
+#define WLC_RC_ROAM_CUR_VER WLC_RC_ROAM_VER_1
+#define RCROAM_HDRLEN 4u
+#define MAX_RCSCAN_TIMER 300u
+
+#define WLC_SILENT_ROAM_VER_1 1
+/* silent roam information struct */
+typedef struct wlc_sroam_info_v1 {
+ /* Silent roam Set/Get value */
+ uint8 sroam_on; /* sroam on/off */
+ int8 sroam_min_rssi; /* minimum rssi threshold to activate the feature */
+ uint8 sroam_rssi_range; /* rssi tolerance to determine stationary status */
+ uint8 sroam_score_delta; /* roam score delta value to prune candidate ap */
+ uint8 sroam_period_time; /* required monitoring period to trigger roaming scan */
+ uint8 sroam_band; /* band setting of roaming scan (all, 5g, 2g) */
+ uint8 sroam_inact_cnt; /* tx/rx frame count threshold for checking inactivity */
+ /* Silent roam monitor value */
+ int8 sroam_ref_rssi; /* reference rssi which is picked when monitoring is
+ * started. it is updated to current rssi when it's
+ * out from rssi range
+ */
+ uint8 sroam_time_since; /* elapsed time since start monitoring */
+ uint8 pad[3];
+ uint32 sroam_txfrm_prev; /* save current tx frame counts */
+ uint32 sroam_rxfrm_prev; /* save current rx frame counts */
+} wlc_sroam_info_v1_t;
+
+typedef struct wlc_sroam {
+ uint16 ver;
+ uint16 len;
+ uint8 data[];
+} wlc_sroam_t;
+
+#define WLC_SILENT_ROAM_CUR_VER WLC_SILENT_ROAM_VER_1
+#define SROAM_HDRLEN 4u
+
+#define DEF_SROAM_OFF 0
+#define DEF_SROAM_MIN_RSSI -65
+#define DEF_SROAM_RSSI_RANGE 3u
+#define DEF_SROAM_SCORE_DELTA 1u
+#define DEF_SROAM_PERIOD_TIME 10u
+#define DEF_SROAM_INACT_CNT 5u
+#define MAX_SROAM_RSSI -70
+#define MAX_SROAM_RSSI_RANGE 5u
+#define MAX_SROAM_SCORE_DELTA 10u
+#define MAX_SROAM_PERIOD_TIME 250u
+#define SROAM_BAND_AUTO 3u
+
+/* MACSMPL IOVAR parameters */
+typedef enum wl_macdbg_macsmpl_iovar_id {
+ WL_MACSMPL_START = 0,
+ WL_MACSMPL_STOP = 1,
+ WL_MACSMPL_DUMP = 2,
+ WL_MACSMPL_STATUS = 3,
+ WL_MACSMPL_SIZE = 4
+} wl_macdbg_macsmpl_iovar_id_t;
+
+/* WL_MACSMPL_STATUS values */
+typedef enum wl_macdbg_macsmpl_status {
+ WL_MACSMPL_STATUS_IDLE = 0,
+ WL_MACSMPL_STATUS_ACTIVE = 1,
+ WL_MACSMPL_STATUS_WAIT_FOR_TRIG = 2,
+ WL_MACSMPL_STATUS_TRIGGERED = 3
+} wl_macdbg_macsmpl_status_t;
+
+/* WL_MACSMPL_START_PARAM subcommand data */
+typedef struct wl_macsmpl_start_param {
+ uint32 trig_condition; /* trigger condition */
+ uint16 gpio_mux; /* MACControl1 GPIOSel field */
+ uint8 pad[2]; /* 4-byte struct alignment */
+} wl_macsmpl_param_start_t;
+
+/* MAC SC fragment request data */
+typedef struct wl_macsmpl_frag_req_param {
+ uint32 offset; /* requested MAC SC fragment offset */
+ uint32 size; /* requested MAC SC fragment size, bytes */
+} wl_macsmpl_frag_req_param_t;
+
+/* MAC SC fragment response data */
+typedef struct wl_macsmpl_frag_resp_param {
+ uint32 offset; /* MAC SC response fragment offset */
+ uint32 size; /* MAC SC reponse fragment size, bytes */
+ uint8 data[]; /* MAC SC response fragment data, flexible array */
+} wl_macsmpl_frag_resp_param_t;
+
+/* MAC SC status data */
+typedef struct wl_macsmpl_status {
+ uint32 maccontrol1; /* MACControl1 register value */
+ uint32 macsc_flags; /* M_MACSC_FLAGS SHM register value */
+ uint16 sc_play_ctrl; /* TXE SampleCollectPlayCtrl register value */
+ uint16 sc_cur_ptr; /* TXE SampleCollectCurPtr register value */
+ uint16 sc_start_ptr; /* TXE SampleCollectStartPtr register value */
+ uint16 sc_stop_ptr; /* TXE SampleCollectStopPtr register value */
+} wl_macsmpl_status_t;
+
+/* WL_MACSMPL parameters data */
+typedef struct wl_macsmpl_param {
+ wl_macdbg_macsmpl_iovar_id_t subcmd_id;
+ union {
+ wl_macsmpl_param_start_t start;
+ wl_macsmpl_frag_req_param_t frag_req;
+ } u;
+} wl_macsmpl_param_t;
+
+/* High priority P2P */
+#define WL_HP2P_COUNTERS_VER 2u
+typedef struct hp2p_counters {
+ uint16 frames_queued;
+ uint16 frames_processed;
+ uint16 frames_exp;
+ uint16 frames_preempt;
+ uint16 frames_retried;
+ uint16 reserved; /* reserved, rsvd2 and rsvd3 are experimental counters */
+ uint16 rsvd2;
+ uint16 rsvd3;
+} hp2p_counters_t;
+
+typedef struct hp2p_counters_v2 {
+ uint32 frames_queued; /* Number of AMPDUs processed */
+ uint16 frames_exp; /* Number of Lifetime expiries */
+ uint16 edt_retry; /* Exceed due to - retry */
+ uint16 mpif_reconf; /* MPIF Reconfigure */
+ uint16 exceed_delay; /* Exceed delay threshold */
+ uint16 edt_nav_thresh; /* Exceed due to - NAV threshold */
+ uint16 edt_dc_def; /* Exceed due to - DC based deferral */
+ uint16 edt_tx_fifo_full; /* Exceed due to - Tx FIFO full */
+ uint16 edt_cts_thresh; /* Exceed due to - CTS threshold */
+ uint16 dbg1; /* dbgX are for internal debugging */
+ uint16 dbg2;
+ uint16 dbg3;
+ uint16 dbg4;
+ uint16 dbg5;
+ uint16 dbg6;
+ uint16 dbg7;
+ uint16 dbg8;
+ uint16 dbg9;
+ uint16 dbg10;
+} hp2p_counters_v2_t;
+
+typedef struct hp2p_counters_hdr {
+ uint16 version; /* version of hp2p_counters_t structure */
+ uint16 len;
+ uint16 slice_idx;
+ uint16 pad;
+ uint8 counters[];
+} hp2p_counters_hdr_t;
+
+/* TX enable flags */
+#define WL_HP2P_TX_AMPDU 0x0001u
+#define WL_HP2P_TX_AMSDU 0x0002u
+#define WL_HP2P_TX_RDG 0x0004u
+
+/* RX enable flags */
+#define WL_HP2P_RX_AMPDU 0x0001u
+#define WL_HP2P_RX_AMSDU 0x0002u
+#define WL_HP2P_RX_RDG 0x0004u
+#define WL_HP2P_RX_AMPDU_REORDER 0x0008u
+
+/* Max/min values for configuration parameters to check validity */
+#define WL_HP2P_MAX_RETRY_MAX 14u
+#define WL_HP2P_MAX_RETRY_MIN 6u
+#define WL_HP2P_LATENCY_TARGET_MAX 30u
+#define WL_HP2P_BURST_INTERVAL_MAX 64u
+#define WL_HP2P_MAX_FIFO 5u
+#define WL_HP2P_MAX_UCODE_LATENCY_THR 500u
+#define WL_HP2P_MAX_UCODE_RECOV_TO 500u
+#define WL_HP2P_MAX_UCODE_NAV_THR 50000u
+
+#define WL_HP2P_VERSION 1u
+typedef struct hp2p_tx_config {
+ struct ether_addr peer_addr;
+ uint16 max_burst;
+ uint16 txop; /* stored in network order (ls octet first) */
+ uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG */
+ uint8 aci;
+ uint8 ecw;
+ uint8 fifo;
+ uint8 tid;
+ uint8 burst_interval;
+ uint8 latency_target;
+ uint8 max_retry;
+ uint8 pad;
+} hp2p_tx_config_t;
+
+typedef struct hp2p_rx_config {
+ struct ether_addr peer_addr;
+ uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG, AMPDU Reorder */
+ uint8 tid;
+ uint8 pad[3];
+} hp2p_rx_config_t;
+
+typedef struct hp2p_udbg_config {
+ uint16 recovery_timeout; /* multiples of 256 usecs */
+ uint16 latency_thresh; /* multiples of 256 usecs */
+ uint16 enable_trap; /* trap if ucode delay exceeds latency_thresh */
+ uint16 nav_thresh; /* in usec */
+} hp2p_udbg_config_t;
+
+typedef struct hp2p_cmd {
+ uint16 type;
+ uint16 len;
+ uint8 data[];
+} hp2p_cmd_t;
+
+typedef struct hp2p_cmd_hdr {
+ uint16 version;
+ uint16 slice_idx;
+ uint8 cmd[];
+} hp2p_cmd_hdr_t;
+
+/* to be used in type field of hp2p_cmd_t structure while issuing HP2P commands */
+typedef enum hp2p_cmd_id {
+ WL_HP2P_CMD_ENABLE = 0,
+ WL_HP2P_CMD_TX_CONFIG = 1,
+ WL_HP2P_CMD_RX_CONFIG = 2,
+ WL_HP2P_CMD_COUNTERS = 3,
+ WL_HP2P_CMD_UDBG_CONFIG = 4
+} hp2p_cmd_id_t;
+
+typedef enum wl_rffe_cmd_type {
+ WL_RFFE_CMD_DEBUG_MODE = 0,
+ WL_RFFE_CMD_ELNABYP_MODE = 1,
+ WL_RFFE_CMD_REG = 2,
+ WL_RFFE_CMD_LAST
+} wl_rffe_cmd_type_t;
+
+/** RFFE struct passed through ioctl */
+typedef struct {
+ uint32 regaddr; /**< rFEM_RegAddr */
+ uint32 antnum; /**< rFEM AntNum */
+ uint32 slaveid; /**< rFEM SlaveID */
+ uint32 value; /**< read/write value */
+} rffe_reg_t;
+
+#ifndef BCMUTILS_ERR_CODES
+
+/*
+ * SOE (Security Offload Engine) status codes.
+ */
+
+/* SOE status codes are reserved from -6144 to -7167 (1K) */
+
+enum wl_soe_status {
+ /* Invalid operational context */
+ WL_SOE_E_BAD_OP_CONTEXT = -6144,
+
+ /* Invalid operational type */
+ WL_SOE_E_BAD_OP_TYPE = -6145,
+
+ /* Failure to get NAF3 encoded scalar */
+ WL_SOE_E_BN_GET_NAF3_ERROR = -6146,
+
+ /* Failure to get NAF3 params */
+ WL_SOE_E_ECG_GET_NAF3_PARAMS_ERROR = -6147,
+
+ /* FAILURE to get Montgomery params */
+ WL_SOE_E_MONT_PARAMS_GET_ERROR = -6148,
+
+ /* Invalid OSL handle */
+ WL_SOE_E_BAD_SI_OSH = -6149,
+
+ /* Invalid ECG group */
+ WL_SOE_E_BAD_ECG_GROUP = -6150,
+
+ /* Invalid BN context */
+ WL_SOE_E_BAD_BN_CTX = -6151,
+
+ /* Invalid SOE core register base address */
+ WL_SOE_E_BAD_SOE_REGBASE = -6152,
+
+ /* Invalid SOE context */
+ WL_SOE_E_BAD_SOE_CONTXT = -6153,
+
+ /* Number of words are too short (i.e., not enough
+ * room to encode the PKA sequence)
+ */
+ WL_SOE_E_PKA_SEQUENCE_WORDS_TOO_SHORT = -6154,
+
+ /* Generic bn_get error */
+ WL_SOE_E_PKA_BN_GET_ERROR = -6155,
+
+ /* Sequence buf too short for BN */
+ WL_SOE_E_PKA_BN_BUF_TOO_SHORT_BN = -6156,
+
+ /* Sequence buf too short for ECG prime */
+ WL_SOE_E_PKA_BN_BUF_TOO_SHORT_ECG_PRIME = -6157,
+
+ /* Sequence buf too short for Montgomery N' */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_MONT_PRIME = -6158,
+
+ /* Sequence buf too short for Accumulator registers */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_ACCM_REG = -6159,
+
+ /* Sequence buf too short for the point P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_P = -6160,
+
+ /* Sequence buf too short for -P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PN = -6161,
+
+ /* Sequence buf too short for 3P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3P = -6162,
+
+ /* Sequence buf too short for -3P */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3PN = -6163,
+
+ /* Sequence buf too short for NAF3 scalar */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_NAF3_SCALAR = -6164,
+
+ /* Sequence buf too short for load shift count */
+ WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PRE_JMP = -6165,
+
+ /* SOE engine(SHA/PKA) failed to complete the operation */
+ WL_SOE_E_ENGINE_UNABLE_TO_COMPLETE = -6166,
+
+ /* Wrong LIR (Long Integer Register) type */
+ WL_SOE_E_PKA_BAD_LIR_TYPE = -6167,
+
+ /* Reference count has reached maximum */
+ WL_SOE_E_MAX_REF_COUNT_REACHED = -6168,
+
+ /* Failed to get the SOE context reference */
+ WL_SOE_E_GET_REF_FAILED = -6169,
+
+ /* Incoming digest length is invalid */
+ WL_SOE_E_SHA_WRONG_DIGEST_LEN = -6170
+};
+
+#endif /* BCMUTILS_ERR_CODES */
+
+#define NR5GCX_STATUS_VER_1 1
+/* NR coex status structures */
+typedef struct wlc_nr5gcx_status_v1 {
+ uint16 version; /* version info */
+ uint16 len; /* status length */
+ uint32 mode; /* NR coex status */
+ uint32 nr_req_cnt; /* NR req number since last read */
+ uint32 nr_dur; /* NR duration since last read, us */
+ uint32 nr_duty_cycle; /* NR duty cycle since last read */
+ uint32 nr_max_dur; /* NR max duration in a single request */
+ uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
+ uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
+ uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
+ uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
+ * 1 event per bit.
+ */
+ uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
+ * event whose dur is max
+ */
+} wlc_nr5gcx_status_v1_t;
#endif /* _wlioctl_h_ */
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl_defs.h 766704 2018-06-09 05:44:18Z $
+ * $Id: wlioctl_defs.h 826113 2019-06-18 21:04:03Z $
*/
#ifndef wlioctl_defs_h
#define D11AC_IOTYPES
#ifndef USE_NEW_RSPEC_DEFS
+/* Remove when no referencing branches exist.
+ * These macros will be used only in older branches (prior to K branch).
+ * Wl layer in newer branches and trunk use those defined in bcmwifi_rspec.h.
+ * Non-wl layer in newer branches and trunk may use these as well
+ * until they are removed.
+ */
/* WL_RSPEC defines for rate information */
#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
-#define WL_RSPEC_HE_MCS_MASK 0x0000000F /* HE MCS value */
-#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /* HE Nss value */
-#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */
#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
-#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */
/* WL_RSPEC_BW field defs */
#define WL_RSPEC_BW_UNSPECIFIED 0
#define WL_SCANFLAGS_CLIENT_SHIFT 8
/* Bitmask for scan_type */
+/* Reserved flag precludes the use of 0xff for scan_type which is
+ * interpreted as default for backward compatibility.
+ * Low priority scan uses currently reserved bit,
+ * this should be changed as scan_type extended.
+ * So, reserved flag definition removed.
+ */
/* Use lower 16 bit for scan flags, the upper 16 bits are for internal use */
#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
#define WL_SCANFLAGS_LOW_PRIO 0x02 /* Low priority scan */
#define WL_SCANFLAGS_ROAMSCAN 0x200 /* Roam scan */
#define WL_SCANFLAGS_FWSCAN 0x400 /* Other FW scan */
#define WL_SCANFLAGS_HOSTSCAN 0x800 /* Host scan */
+#define WL_SCANFLAGS_LOW_POWER_SCAN 0x1000 /* LOW power scan, scheduled scan
+ * only on scancore
+ */
+#define WL_SCANFLAGS_HIGH_ACCURACY 0x2000 /* High accuracy scan, which needs
+ * reliable scan results
+ */
+#define WL_SCANFLAGS_LOW_SPAN 0x4000 /* LOW span scan, which expects
+ * scan to be completed ASAP
+ */
/* wl_iscan_results status values */
#define WL_SCAN_RESULTS_SUCCESS 0
#define WL_SCAN_RESULTS_ABORTED 3
#define WL_SCAN_RESULTS_NO_MEM 4
+/* Flags for parallel scan */
+/* Bitmap to enable/disable rsdb parallel scan, 5g-5g/2g-2g parallel scan
+ * SCAN_PARALLEL_PASSIVE_5G ==> 5g-5g parallel scan
+ * SCAN_PARALLEL_PASSIVE_2G ==> 2g-2g parallel scan
+ */
+#define SCAN_PARALLEL_PASSIVE_5G (0x40)
+#define SCAN_PARALLEL_PASSIVE_2G (0x80)
+
#define SCANOL_ENABLED (1 << 0)
#define SCANOL_BCAST_SSID (1 << 1)
#define SCANOL_NOTIFY_BCAST_SSID (1 << 2)
/* bit definitions for bcnflags in wl_bss_info */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */
+#define WL_BSS_BCNFLAGS_MULTIPLE_BSSID_SET 0x4 /* this AP belongs to a multiple BSSID set */
+#define WL_BSS_BCNFLAGS_NONTRANSMITTED_BSSID 0x8 /* this AP is the transmitted BSSID */
/* bssinfo flag for nbss_cap */
#define VHT_BI_SGI_80MHZ 0x00000100
#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
#endif /* BCMCCX || BCMEXTCCX */
#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+#define WL_LINK_KEY (1 << 7) /* For linking keys of both cores */
+#define WL_UNLINK_KEY (1 << 8) /* For unlinking keys of both cores */
/* wireless security bitvec */
#define WSEC_NONE 0x0
#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
-#define WPA_AUTH_WAPI 0x0400
+#define WPA_AUTH_WAPI 0x0400 /* why it is same as WAPI_AUTH_UNSPECIFIED */
#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */
#define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384))
#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
-#define WPA3_AUTH_SAE_FBT 0x80000 /* SAE with FT */
#define WPA3_AUTH_OWE 0x100000 /* OWE */
#define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */
#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */
#define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */
-
+#define WPA3_AUTH_SAE_AP_ONLY 0x1000000 /* SAE restriction to connect to pure SAE APs */
/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
#define WPA2_AUTH_SHA256 0x8000
#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
/* pmkid */
-#define MAXPMKID 16
+#define MAXPMKID 16 /* max # PMKID cache entries NDIS */
/* SROM12 changes */
#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */
#define WLC_DUMP_RATESET 322
#define WLC_ECHO 323
-#define WLC_LAST 324
+#define WLC_LAST 324 /* The last ioctl. Also push this
+ * number when adding new ioctls
+ */
+/*
+ * Alert:
+ * Duplicate a few definitions that irelay requires from epiioctl.h here
+ * so caller doesn't have to include this file and epiioctl.h .
+ * If this grows any more, it would be time to move these irelay-specific
+ * definitions out of the epiioctl.h and into a separate driver common file.
+ */
#define WLC_SPEC_FLAG 0x80000000 /* For some special IOCTL */
#ifndef EPICTRL_COOKIE
#define EPICTRL_COOKIE 0xABADCEDE
#define TRIGGER_BADFCS 0x08
#define TRIGGER_BADPLCP 0x10
#define TRIGGER_CRSGLITCH 0x20
+#define TRIGGER_ASYNC 0x40
#define WL_SAMPLEDATA_HEADER_TYPE 1
#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
#define WL_TX_POWER_F_PROP11NRATES 0x80
#define WL_TX_POWER_F_UNIT_QDBM 0x100
#define WL_TX_POWER_F_TXCAP 0x200
+#define WL_TX_POWER_F_HE 0x400
+#define WL_TX_POWER_F_RU_RATE 0x800
+
/* Message levels */
#define WL_ERROR_VAL 0x00000001
#define WL_TRACE_VAL 0x00000002
#define WL_PRUSR_VAL 0x00000200
#define WL_PS_VAL 0x00000400
#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
-#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
+#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
#define WL_PORT_VAL 0x00001000
#define WL_DUAL_VAL 0x00002000
#define WL_WSEC_VAL 0x00004000
#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */
#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
-#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
+#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
#define WL_REGULATORY_VAL 0x00080000
-#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */
+#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */
#define WL_TAF_VAL 0x00100000
#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */
#define WL_TDLS_VAL 0x00001000
#define WL_MCNX_VAL 0x00002000
#define WL_PROT_VAL 0x00004000
-#define WL_PSTA_VAL 0x00008000
#define WL_TSO_VAL 0x00010000
#define WL_TRF_MGMT_VAL 0x00020000
#define WL_LPC_VAL 0x00040000
#define WL_P2PO_VAL 0x00200000
#define WL_TBTT_VAL 0x00400000
#define WL_FBT_VAL 0x00800000
-#define WL_RRM_VAL 0x00800000 /* reuse */
+#define WL_RRM_VAL 0x00800000 /* reuse */
#define WL_MQ_VAL 0x01000000
/* This level is currently used in Phoenix2 only */
#define WL_SRSCAN_VAL 0x02000000
*/
#define WL_ASSOC_AP_VAL 0x00000001
#define WL_FILS_VAL 0x00000002
+#define WL_LATENCY_VAL 0x00000004
+#define WL_WBUS_VAL 0x00000008
/* max # of leds supported by GPIO (gpio pin# == led index#) */
#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
-#define WL_LED_WI1 7
-#define WL_LED_WI2 8
-#define WL_LED_WI3 9
+#define WL_LED_WI1 7 /* wlan indicator 1 mode (legacy cust) */
+#define WL_LED_WI2 8 /* wlan indicator 2 mode (legacy cust) */
+#define WL_LED_WI3 9 /* wlan indicator 3 mode (legacy cust) */
#define WL_LED_ASSOC 10 /* associated state indicator */
#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
-#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
-#define WL_LED_WI4 13
-#define WL_LED_WI5 14
+#define WL_LED_ASSOCACT 12 /* on associated; blink fast for activity */
+#define WL_LED_WI4 13 /* wlan indicator 4 mode (legacy cust 5G) */
+#define WL_LED_WI5 14 /* wlan indicator 5 mode (legacy cust 2.4) */
#define WL_LED_BLINKSLOW 15 /* blink slow */
#define WL_LED_BLINKMED 16 /* blink med */
#define WL_LED_BLINKFAST 17 /* blink fast */
#define WL_LED_BLINKCUSTOM 18 /* blink custom */
-#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_BLINKPERIODIC 19 /* blink period (custom 1000ms / off 400ms) */
#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
/* keep on for 300 sec */
#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
-#define WL_LED_WI6 22
-#define WL_LED_WI7 23
-#define WL_LED_WI8 24
+#define WL_LED_WI6 22 /* wlan indicator 6 mode legacy rtr 43526 5 */
+#define WL_LED_WI7 23 /* wlan indicator 7 mode legacy rtr 43526 2.4 */
+#define WL_LED_WI8 24 /* wlan indicator 8 mode legacy rtr 43526 */
#define WL_LED_NUMBEHAVIOR 25
/* led behavior numeric value format */
/* number of bytes needed to define a proper bit mask for MAC event reporting */
#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define BCMIO_NBBY 8
-#define WL_EVENTING_MASK_LEN (16+4)
+#define WL_EVENTING_MASK_LEN (16+4) /* Don't increase this without wl review */
#define WL_EVENTING_MASK_EXT_LEN \
MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY))
/* maximum channels returned by the get valid channels iovar */
#define WL_NUMCHANNELS 64
+/* Channels break down for 2G BAND
+* 2G 20MHz = 14
+*
+* 2G 40MHz
+* 9 * 2 = 18
+*
+* 2G tot = 14 + 18 = 32
+*
+* Channels Break down for 5G BAND
+* 5G 20MHz
+* 36-48 4
+* 52-64 4
+* 100-144 12
+* 149-161 4
+* 165 1
+* 5G 20 subtot = 25
+*
+* 5G 40 12 * 2 = 24
+* 5G 80 6 * 4 = 24
+* 5G 160 2 * 8 = 16
+*
+* 5G total = 25 + 24+ 24+ 16 = 89
+*
+* TOTAL 2G and 5G
+* 2G + 5G = (32 + 89) = 121
+*
+* Channels Break down for 6G BAND
+* 20MHz = 59
+* 40MHz 29 * 2 = 58
+* 80MHz 14 * 4 = 56
+* 160MHz 7 * 8 = 56
+* 6G total = 59 + 58 + 56 + 56 = 229
+*
+* Toal WL_NUMCHANSPECS 2G/5G/6G
+* total = 32 + 89 + 229 = 350
+*
+* IF 5g 80+80 is defined
+* 80MHz cf pairs are:
+* 42 106
+* 42 122
+* 42 138
+* 42 155
+* 58 106
+* 58 122
+* 58 138
+* 58 155
+* 106 138
+* 106 155
+* 122 155
+* 138 155
+*
+*
+* 12 pairs * 8 primary channels = 96
+* TOTAL 2G + 5G + 5G (80 + 80)
+* 32 + 89 + 96 = 217
+*
+*TOTAL 2G + 5G + 5G (80 + 80) +6G (excluding 80 + 80)
+* 32 + 89 + 96 + 229 = 446
+*
+*/
+#ifdef WL_BAND6G
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 446
+#else
+#define WL_NUMCHANSPECS 350
+#endif // endif
+#else
/* max number of chanspecs (used by the iovar to calc. buf space) */
#ifdef WL11AC_80P80
#define WL_NUMCHANSPECS 206
#else
#define WL_NUMCHANSPECS 110
#endif // endif
+#endif /* WL_BAND6G */
/* WDS link local endpoint WPA role */
#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
+/* at some point we may need bitvec here (combination of actions) */
/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
/* schedule option - WL_P2P_SCHED_TYPE_XXX */
#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
+/* at some point we may need bitvec here (combination of options) */
/* schedule option - WL_P2P_SCHED_TYPE_ABS */
#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
#define WLFEATURE_DISABLE_11N_GF 0x00000080
-/* Proxy STA modes */
-#define PSTA_MODE_DISABLED 0
-#define PSTA_MODE_PROXY 1
-#define PSTA_MODE_REPEATER 2
-
/* op code in nat_cfg */
#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
#endif /* WLAFTERBURNER */
-#define WL_SWFL_NOHWRADIO 0x0004
+#define WL_SWFL_NOHWRADIO 0x0004 /* Disable HW Radio monitor (e.g., Cust Spec) */
#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
#define TOE_ERRTEST_RX_CSUM2 0x00000004
/* ARP Offload feature flags for arp_ol iovar */
-#define ARP_OL_AGENT 0x00000001
-#define ARP_OL_SNOOP 0x00000002
-#define ARP_OL_HOST_AUTO_REPLY 0x00000004
-#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define ARP_OL_UPDATE_HOST_CACHE 0x00000010
/* ARP Offload error injection */
#define ARP_ERRTEST_REPLY_PEER 0x1
#define ETD_DATA_JOIN_INFO 0
#define ETD_DATA_VERSION_V1 1
+/* CTMODE DBG */
+/* input param: [31:16] => MPDU_THRESHOLD
+ * [15:03] => RESERVED
+ * [02] => enable UFP
+ * [01] => enable UFC
+ * [00] => enalbe CTMODE
+ */
+#define CTMODE_DBG_CTMODE_EN (0x1u)
+#define CTMODE_DBG_UFC_EN (0x2u)
+#define CTMODE_DBG_UFP_EN (0x4u)
+#define CTMODE_DBG_MPDU_THRESHOLD_SHIFT (7u)
+#define CTMODE_DBG_MPDU_THRESHOLD_MASK ((0x1FFu) << CTMODE_DBG_MPDU_THRESHOLD_SHIFT)
+#define CTMODE_DBG_BYTES_THRESHOLD_SHIFT (16u)
+#define CTMODE_DBG_BYTES_THRESHOLD_MASK ((0xFFFu) << CTMODE_DBG_BYTES_THRESHOLD_SHIFT)
+
+/* ====== SC use case configs ========= */
+/* SC user/use case request */
+#define WL_SC_REQ_SCAN 0u /* user scan */
+#define WL_SC_REQ_CNX 1u /* associated idle */
+#define WL_SC_REQ_NAN 2u /* NAN synchronization and discovery offload */
+
+/* === Per use case configuration === */
+/* scan cfgs */
+#define SC_SCAN_CFG_PASSIVE_MASK 0x01u /* Enable passive scan on sc */
+#define SC_SCAN_CFG_PASSIVE_SHIFT 0u
+#define SC_SCAN_CFG_LP_SCAN_MASK 0x02u /* Enable low prio scan on sc */
+#define SC_SCAN_CFG_LP_SCAN_SHIFT 1u
+#define SC_SCAN_CFG_REG_SCAN_MASK 0x04u /* Enable split scan using sc */
+#define SC_SCAN_CFG_REG_SCAN_SHIFT 2u
+#define SC_SCAN_CFG_FULL_SCAN_MASK 0x08u /* Enable full scan on sc */
+#define SC_SCAN_CFG_FULL_SCAN_SHIFT 3u
+/* Add get and set macros for each of the configs? */
+
+/* === Place holder for cnx and nan cfgs === */
#endif /* wlioctl_defs_h */
/*
* Custom OID/ioctl related helper functions.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Fundamental types and constants relating to WPA
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wpa.h 761317 2018-05-07 21:33:58Z $
+ * $Id: wpa.h 822438 2019-05-29 17:13:44Z $
*/
#ifndef _proto_wpa_h_
(cipher) == WPA_CIPHER_AES_CCM || \
(cipher) == WPA_CIPHER_AES_GCM || \
(cipher) == WPA_CIPHER_AES_GCM256 || \
+ (cipher) == WPA_CIPHER_CCMP_256 || \
(cipher) == WPA_CIPHER_TPK)
+#define IS_WPA_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
+ (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
+ (cipher) == WPA_CIPHER_BIP_CMAC_256)
+
#ifdef BCMWAPI_WAI
#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
(cipher) == WAPI_CSE_WPI_SMS4)
(cipher) == WPA_CIPHER_BIP_GMAC_128 || \
(cipher) == WPA_CIPHER_BIP_GMAC_256 || \
(cipher) == WPA_CIPHER_BIP_CMAC_256)
+
+#define WPA_IS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256 || \
+ (akm) == RSN_AKM_FBT_SHA384)
+
+#define WPA_IS_FILS_AKM(akm) ((akm) == RSN_AKM_FILS_SHA256 || \
+ (akm) == RSN_AKM_FILS_SHA384)
+
+#define WPA_IS_FILS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256_FILS || \
+ (akm) == RSN_AKM_FBT_SHA384_FILS)
+
/* WPA TKIP countermeasures parameters */
#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */
typedef uint8 rsn_akm_t;
typedef uint8 auth_ie_type_mask_t;
+/* Old location for this structure. Moved to bcmwpa.h */
+#ifndef RSN_IE_INFO_STRUCT_RELOCATED
typedef struct rsn_ie_info {
uint8 version;
rsn_cipher_t g_cipher;
uint8 kek_len; /* EAPOL KEK */
uint8 tk_len; /* EAPOL TK */
uint8 ptk_len; /* EAPOL PTK */
+ uint8 kck2_len; /* EAPOL KCK2 */
+ uint8 kek2_len; /* EAPOL KEK2 */
} rsn_ie_info_t;
+#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
#ifdef BCMWAPI_WAI
#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
/*
* WPS IE definitions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linux_osl.c 767848 2018-06-15 09:33:44Z $
+ * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
*/
#define LINUX_PORT
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <pcicfg.h>
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
+#include <asm-generic/pci-dma-compat.h>
+#endif
#ifdef BCM_SECURE_DMA
#include <linux/module.h>
static void osl_dma_lock(osl_t *osh);
static void osl_dma_unlock(osl_t *osh);
static void osl_dma_lock_init(osl_t *osh);
+
#define DMA_LOCK(osh) osl_dma_lock(osh)
#define DMA_UNLOCK(osh) osl_dma_unlock(osh)
#define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
#ifdef DHD_MAP_LOGGING
#define DHD_MAP_LOG_SIZE 2048
+typedef struct dhd_map_item {
+ dmaaddr_t pa; /* DMA address (physical) */
+ uint64 ts_nsec; /* timestamp: nsec */
+ uint32 size; /* mapping size */
+ uint8 rsvd[4]; /* reserved for future use */
+} dhd_map_item_t;
+
typedef struct dhd_map_record {
- dma_addr_t addr;
- uint64 time;
+ uint32 items; /* number of total items */
+ uint32 idx; /* current index of metadata */
+ dhd_map_item_t map[0]; /* metadata storage */
} dhd_map_log_t;
-dhd_map_log_t *dhd_map_log = NULL, *dhd_unmap_log = NULL;
-uint32 map_idx = 0, unmap_idx = 0;
-
void
-osl_dma_map_dump(void)
+osl_dma_map_dump(osl_t *osh)
+{
+ dhd_map_log_t *map_log, *unmap_log;
+ uint64 ts_sec, ts_usec;
+
+ map_log = (dhd_map_log_t *)(osh->dhd_map_log);
+ unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
+ osl_get_localtime(&ts_sec, &ts_usec);
+
+ if (map_log && unmap_log) {
+ printk("%s: map_idx=%d unmap_idx=%d "
+ "current time=[%5lu.%06lu]\n", __FUNCTION__,
+ map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
+ (unsigned long)ts_usec);
+ printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
+ " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
+ (uint64)__virt_to_phys((ulong)(map_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * map_log->items),
+ (uint64)__virt_to_phys((ulong)(unmap_log->map)),
+ (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
+ }
+}
+
+static void *
+osl_dma_map_log_init(uint32 item_len)
{
- printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n",
- __FUNCTION__, map_idx, unmap_idx, OSL_SYSUPTIME_US());
- if (dhd_map_log && dhd_unmap_log) {
- printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n",
- __FUNCTION__, (uint64)__virt_to_phys((ulong)dhd_map_log),
- (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE),
- (uint64)__virt_to_phys((ulong)dhd_unmap_log),
- (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE));
+ dhd_map_log_t *map_log;
+ gfp_t flags;
+ uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
+ (item_len * sizeof(dhd_map_item_t)));
+
+ flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+ map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
+ if (map_log) {
+ memset(map_log, 0, alloc_size);
+ map_log->items = item_len;
+ map_log->idx = 0;
}
+
+ return (void *)map_log;
+}
+
+static void
+osl_dma_map_log_deinit(osl_t *osh)
+{
+ if (osh->dhd_map_log) {
+ kfree(osh->dhd_map_log);
+ osh->dhd_map_log = NULL;
+ }
+
+ if (osh->dhd_unmap_log) {
+ kfree(osh->dhd_unmap_log);
+ osh->dhd_unmap_log = NULL;
+ }
+}
+
+static void
+osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
+{
+ dhd_map_log_t *log = (dhd_map_log_t *)handle;
+ uint32 idx;
+
+ if (log == NULL) {
+ printk("%s: log is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ idx = log->idx;
+ log->map[idx].ts_nsec = osl_localtime_ns();
+ log->map[idx].pa = pa;
+ log->map[idx].size = len;
+ log->idx = (idx + 1) % log->items;
}
#endif /* DHD_MAP_LOGGING */
/* Array bounds covered by ASSERT in osl_attach */
return linuxbcmerrormap[-bcmerror];
}
-
osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
{
DMA_LOCK_INIT(osh);
#ifdef DHD_MAP_LOGGING
- dhd_map_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
- if (dhd_map_log) {
- memset(dhd_map_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
+ osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_map_log == NULL) {
+ printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
}
- dhd_unmap_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
- if (dhd_unmap_log) {
- memset(dhd_unmap_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
+
+ osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
+ if (osh->dhd_unmap_log == NULL) {
+ printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
}
#endif /* DHD_MAP_LOGGING */
bcm_object_trace_deinit();
#ifdef DHD_MAP_LOGGING
- if (dhd_map_log) {
- kfree(dhd_map_log);
- }
- if (dhd_unmap_log) {
- kfree(dhd_unmap_log);
- }
+ osl_dma_map_log_deinit(osh->dhd_map_log);
+ osl_dma_map_log_deinit(osh->dhd_unmap_log);
#endif /* DHD_MAP_LOGGING */
ASSERT(osh->magic == OS_HANDLE_MAGIC);
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+#if defined(__ARM_ARCH_7A__)
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
#else
return ((struct pci_dev *)osh->pdev)->bus->number;
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+#if defined(__ARM_ARCH_7A__)
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
#else
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
return (void *)(uintptr)virt_to_phys(va);
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
#include <asm/cacheflush.h>
void BCMFASTPATH
osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
return;
}
-#endif /* LINUX_VERSION_CODE >= 2.6.36 */
dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
map_addr = pci_map_single(osh->pdev, va, size, dir);
#endif /* ! STB_SOC_WIFI */
-#ifdef DHD_MAP_LOGGING
- if (dhd_map_log) {
- dhd_map_log[map_idx].addr = map_addr;
- dhd_map_log[map_idx].time = OSL_SYSUPTIME_US();
- map_idx++;
- map_idx = map_idx % DHD_MAP_LOG_SIZE;
- }
-#endif /* DHD_MAP_LOGGING */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
ret = pci_dma_mapping_error(osh->pdev, map_addr);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
- ret = pci_dma_mapping_error(map_addr);
-#else
- ret = 0;
-#endif // endif
+
if (ret) {
printk("%s: Failed to map memory\n", __FUNCTION__);
PHYSADDRLOSET(ret_addr, 0);
PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
}
+#ifdef DHD_MAP_LOGGING
+ osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
+#endif /* DHD_MAP_LOGGING */
+
DMA_UNLOCK(osh);
return ret_addr;
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-#ifdef BCMDMA64OSL
- PHYSADDRTOULONG(pa, paddr);
#ifdef DHD_MAP_LOGGING
- if (dhd_unmap_log) {
- dhd_unmap_log[unmap_idx].addr = paddr;
- dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
- unmap_idx++;
- unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
- }
+ osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
#endif /* DHD_MAP_LOGGING */
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
pci_unmap_single(osh->pdev, paddr, size, dir);
#else /* BCMDMA64OSL */
dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
#endif /* (__LINUX_ARM_ARCH__ == 8) */
#else /* STB_SOC_WIFI */
-#ifdef DHD_MAP_LOGGING
- if (dhd_unmap_log) {
- dhd_unmap_log[unmap_idx].addr = pa;
- dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
- unmap_idx++;
- unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
- }
-#endif /* DHD_MAP_LOGGING */
-
pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
#endif /* STB_SOC_WIFI */
#endif /* BCMDMA64OSL */
+
DMA_UNLOCK(osh);
}
void
osl_sleep(uint ms)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if (ms < 20)
usleep_range(ms*1000, ms*1000 + 1000);
else
-#endif // endif
- msleep(ms);
+ msleep(ms);
}
uint64
osl_sysuptime_us(void)
{
- struct timeval tv;
+ struct osl_timespec tv;
uint64 usec;
- do_gettimeofday(&tv);
+ osl_do_gettimeofday(&tv);
/* tv_usec content is fraction of a second */
usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
return usec;
}
+uint64
+osl_localtime_ns(void)
+{
+ uint64 ts_nsec = 0;
+
+ ts_nsec = local_clock();
+
+ return ts_nsec;
+}
+
+void
+osl_get_localtime(uint64 *sec, uint64 *usec)
+{
+ uint64 ts_nsec = 0;
+ unsigned long rem_nsec = 0;
+
+ ts_nsec = local_clock();
+ rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
+ *sec = (uint64)ts_nsec;
+ *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
+}
+
+uint64
+osl_systztime_us(void)
+{
+ struct timeval tv;
+ uint64 tzusec;
+
+ do_gettimeofday(&tv);
+ /* apply timezone */
+ tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
+ USEC_PER_SEC);
+ tzusec += tv.tv_usec;
+
+ return tzusec;
+}
+
/*
* OSLREGOPS specifies the use of osl_XXX routines to be used for register access
*/
/* timer apis */
/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
-#ifdef REPORT_FATAL_TIMEOUTS
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+void
+timer_cb_compat(struct timer_list *tl)
+{
+ timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
+ t->callback((ulong)t->arg);
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+
osl_timer_t *
osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
{
MFREE(NULL, t, sizeof(osl_timer_t));
return (NULL);
}
- t->timer->data = (ulong)arg;
- t->timer->function = (linux_timer_fn)fn;
t->set = TRUE;
- init_timer(t->timer);
+ init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
return (t);
}
if (periodic) {
printf("Periodic timers are not supported by Linux timer apis\n");
}
- t->timer->expires = jiffies + ms*HZ/1000;
+ timer_expires(t->timer) = jiffies + ms*HZ/1000;
add_timer(t->timer);
printf("Periodic timers are not supported by Linux timer apis\n");
}
t->set = TRUE;
- t->timer->expires = jiffies + ms*HZ/1000;
+ timer_expires(t->timer) = jiffies + ms*HZ/1000;
- mod_timer(t->timer, t->timer->expires);
+ mod_timer(t->timer, timer_expires(t->timer));
return;
}
}
return (TRUE);
}
-#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
+int
+kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
+{
+ return (int)kernel_read(file, addr, (size_t)count, &offset);
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+
+void *
+osl_spin_lock_init(osl_t *osh)
+{
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+
+void
+osl_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+
+unsigned long
+osl_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+
+ return flags;
+}
+
+void
+osl_spin_unlock(void *lock, unsigned long flags)
+{
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
#ifdef USE_DMA_LOCK
static void
osl_dma_lock(osl_t *osh)
{
- /* XXX: The conditional check is to avoid the scheduling bug.
- * If the spin_lock_bh is used under the spin_lock_irqsave,
- * Kernel triggered the warning message as the spin_lock_irqsave
- * disables the interrupt and the spin_lock_bh doesn't use in case
- * interrupt is disabled.
- * Please refer to the __local_bh_enable_ip() function
- * in kernel/softirq.c to understand the condtion.
- */
if (likely(in_irq() || irqs_disabled())) {
spin_lock(&osh->dma_lock);
} else {
spin_lock_init(&osh->dma_lock);
osh->dma_lock_bh = FALSE;
}
-#endif /* USE_DMA_LOCK */
\ No newline at end of file
+#endif /* USE_DMA_LOCK */
+
+void
+osl_do_gettimeofday(struct osl_timespec *ts)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ struct timespec curtime;
+#else
+ struct timeval curtime;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ getnstimeofday(&curtime);
+ ts->tv_nsec = curtime.tv_nsec;
+ ts->tv_usec = curtime.tv_nsec / 1000;
+#else
+ do_gettimeofday(&curtime);
+ ts->tv_usec = curtime.tv_usec;
+ ts->tv_nsec = curtime.tv_usec * 1000;
+#endif
+ ts->tv_sec = curtime.tv_sec;
+}
+
+void
+osl_get_monotonic_boottime(struct osl_timespec *ts)
+{
+ struct timespec curtime;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+ curtime = ktime_to_timespec(ktime_get_boottime());
+#else
+ get_monotonic_boottime(&curtime);
+#endif
+ ts->tv_sec = curtime.tv_sec;
+ ts->tv_nsec = curtime.tv_nsec;
+ ts->tv_usec = curtime.tv_nsec / 1000;
+}
\ No newline at end of file
/*
* Private header file for Linux OS Independent Layer
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linux_osl_priv.h 737887 2017-12-23 12:15:26Z $
+ * $Id: linux_osl_priv.h 794159 2018-12-12 07:41:14Z $
*/
#ifndef _LINUX_OSL_PRIV_H_
spinlock_t dma_lock;
bool dma_lock_bh;
#endif /* USE_DMA_LOCK */
+#ifdef DHD_MAP_LOGGING
+ void *dhd_map_log;
+ void *dhd_unmap_log;
+#endif /* DHD_MAP_LOGGING */
};
#endif /* _LINUX_OSL_PRIV_H_ */
/*
* Linux Packet (skb) interface
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#include <osl.h>
#include <bcmutils.h>
#include <pcicfg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
#include <linux/fs.h>
#include "linux_osl_priv.h"
#ifdef CONFIG_DHD_USE_STATIC_BUF
if (!bcm_static_buf && adapter) {
if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
- 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
printk("can not alloc static buf!\n");
bcm_static_skb = NULL;
ASSERT(osh->magic == OS_HANDLE_MAGIC);
int i;
void *skb_buff_ptr = 0;
bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
- skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
if (!skb_buff_ptr) {
printk("cannot alloc static buf!\n");
bcm_static_buf = NULL;
--- /dev/null
+/*
+ * otpdefs.h SROM/OTP definitions.
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _OTPDEFS_H_
+#define _OTPDEFS_H_
+
+/* SFLASH */
+#define SFLASH_ADDRESS_OFFSET_4368 0x1C000000u
+#define SFLASH_SKU_OFFSET_4368 0xEu
+#define SFLASH_MACADDR_OFFSET_4368 0x4u
+/*
+ * In sflash based chips, first word in sflash says the length.
+ * So only default value is defined here. Actual length is read
+ * from sflash in dhdpcie_srom_sflash_health_chk
+ * 0x0521 * 2 .x2 since length says number of words.
+ */
+#define SFLASH_LEN_4368 0xA42u
+
+#define SROM_ADDRESS_OFFSET_4355 0x0800u
+#define SROM_ADDRESS_OFFSET_4364 0xA000u
+#define SROM_ADDRESS_OFFSET_4377 0x0800u
+#define SROM_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
+#define SROM_MACADDR_OFFSET_4355 0x84u
+#define SROM_MACADDR_OFFSET_4364 0x82u
+#define SROM_MACADDR_OFFSET_4377 0xE2u
+#define SROM_SKU_OFFSET_4355 0x8Au
+#define SROM_SKU_OFFSET_4364 0x8Cu
+#define SROM_SKU_OFFSET_4377 0xECu
+#define SROM_CAL_SIG1_OFFSET_4355 0xB8u
+#define SROM_CAL_SIG2_OFFSET_4355 0xBAu
+#define SROM_CAL_SIG1_OFFSET_4364 0xA0u
+#define SROM_CAL_SIG2_OFFSET_4364 0xA2u
+#define SROM_CAL_SIG1 0x4c42u
+#define SROM_CAL_SIG2 0x424fu
+#define SROM_LEN_4355 512u
+#define SROM_LEN_4364 2048u
+#define SROM_LEN_4377 2048u
+
+#define OTP_USER_AREA_OFFSET_4355 0xC0u
+#define OTP_USER_AREA_OFFSET_4364 0xC0u
+#define OTP_USER_AREA_OFFSET_4368 0x120u
+#define OTP_USER_AREA_OFFSET_4377 0x120u
+#define OTP_OFFSET_4368 0x5000u
+#define OTP_OFFSET_4377 0x11000u
+#define OTP_CTRL1_VAL 0xFA0000
+#define OTP_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
+#define OTP_VERSION_TUPLE_ID 0x15
+#define OTP_VENDOR_TUPLE_ID 0x80
+#define OTP_CIS_REGION_END_TUPLE_ID 0XFF
+
+#define PCIE_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0x3000)
+#define SPROM_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_CTRL)
+#define SPROM_CTRL_OPCODE_READ_MASK 0x9FFFFFFF
+#define SPROM_CTRL_START_BUSY_MASK 0x80000000
+#define SPROM_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_ADDRESS)
+#define SPROM_DATA(sih) (SI_ENUM_BASE(sih) + CC_SROM_DATA)
+#define OTP_CTRL1_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0xF4)
+#define PMU_MINRESMASK_REG_ADDR(sih) (SI_ENUM_BASE(sih) + MINRESMASKREG)
+#define CHIP_COMMON_STATUS_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CHIPST)
+#define CHIP_COMMON_CLKDIV2_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CLKDIV2)
+
+#define CC_CLKDIV2_SPROMDIV_MASK 0x7u
+#define CC_CLKDIV2_SPROMDIV_VAL 0X4u
+#define CC_CHIPSTATUS_STRAP_BTUART_MASK 0x40u
+#define PMU_OTP_PWR_ON_MASK 0xC47
+#define PMU_PWRUP_DELAY 500 /* in us */
+#define DONGLE_TREFUP_PROGRAM_DELAY 5000 /* 5ms in us */
+#define SPROM_BUSY_POLL_DELAY 5 /* 5us */
+
+typedef enum {
+ BCM4355_IDX = 0,
+ BCM4364_IDX,
+ BCM4368_IDX,
+ BCM4377_IDX,
+ BCMMAX_IDX
+} chip_idx_t;
+
+typedef enum {
+ BCM4368_BTOP_IDX,
+ BCM4377_BTOP_IDX,
+ BCMMAX_BTOP_IDX
+} chip_idx_btop_t;
+
+typedef enum {
+ BCM4368_SFLASH_IDX,
+ BCMMAX_SFLASH_IDX
+} chip_idx_sflash_t;
+
+extern uint32 otp_addr_offsets[];
+extern uint32 otp_usrarea_offsets[];
+extern uint32 sku_offsets[];
+extern uint32 srf_addr_offsets[];
+extern uint32 supported_chips[];
+
+char *dhd_get_plat_sku(void);
+#endif /* _OTPDEFS_H */
* Contains PCIe related functions that are shared between different driver models (e.g. firmware
* builds, DHD builds, BMAC builds), in order to avoid code duplication.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils.c 769534 2018-06-26 21:19:11Z $
+ * $Id: siutils.c 798061 2019-01-04 23:27:15Z $
*/
#include <bcm_cfg.h>
ASSERT(0);
}
+uint32
+si_oobr_baseaddr(si_t *sih, bool second)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return 0;
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
+ (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
+ (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return (second ? sii->oob_router1 : sii->oob_router);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
uint
si_coreid(si_t *sih)
{
else if (CST4369_CHIPMODE_PCIE(sih->chipst))
hosti = CHIP_HOSTIF_PCIEMODE;
break;
+
case BCM4350_CHIP_ID:
case BCM4354_CHIP_ID:
case BCM43556_CHIP_ID:
bool wasup;
uint32 corecap;
uint memsize = 0;
+ uint banku_size = 0;
uint32 nab = 0;
uint32 nbb = 0;
uint32 totb = 0;
W_REG(sii->osh, arm_bidx, idx);
bxinfo = R_REG(sii->osh, arm_binfo);
- memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+ if (bxinfo & ARMCR4_BUNITSZ_MASK) {
+ banku_size = ARMCR4_BSZ_1K;
+ } else {
+ banku_size = ARMCR4_BSZ_8K;
+ }
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size;
}
/* Return to previous state and core */
bool _bcmsrpwr = FALSE;
#endif // endif
-#ifndef BCMSDIO
-#define PWRREQ_OFFSET(sih) DAR_PCIE_PWR_CTRL((sih)->buscorerev)
-#else
#define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl)
-#endif // endif
static void
si_corereg_pciefast_write(si_t *sih, uint regoff, uint val)
ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
-#ifndef BCMSDIO
- if (PCIECOREREV(sih->buscorerev) == 66) {
- si_corereg_pciefast_write(sih, OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl), 0);
- }
-#endif // endif
-
r = (volatile uint32 *)((volatile char *)sii->curmap +
PCI_16KB0_PCIREGS_OFFSET + regoff);
ASSERT(r == val);
}
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
return r;
}
r = si_corereg_pciefast_read(sih, offset);
}
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
return r;
}
r = si_corereg_pciefast_read(sih, offset);
}
- r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK;
+ r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK;
return r;
}
+uint32
+si_srpwr_domain_all_mask(si_t *sih)
+{
+ uint32 mask = SRPWR_DMN0_PCIE_MASK |
+ SRPWR_DMN1_ARMBPSD_MASK |
+ SRPWR_DMN2_MACAUX_MASK |
+ SRPWR_DMN3_MACMAIN_MASK;
+
+ if (si_scan_core_present(sih)) {
+ mask |= SRPWR_DMN4_MACSCAN_MASK;
+ }
+
+ return mask;
+}
+
/* Utility API to read/write the raw registers with absolute address.
* This function can be invoked from either FW or host driver.
*/
si_info_t *sii = SI_INFO(sih);
return sii->lhl_ps_mode;
}
+
+bool
+BCMRAMFN(si_scan_core_present)(si_t *sih)
+{
+ return ((si_numcoreunits(sih, D11_CORE_ID) >= 2) &&
+ (si_numcoreunits(sih, SR_CORE_ID) > 4));
+}
/*
* Include file private to the SOC Interconnect support files.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils_priv.h 769534 2018-06-26 21:19:11Z $
+ * $Id: siutils_priv.h 795345 2018-12-18 16:52:03Z $
*/
#ifndef _siutils_priv_h_
#if defined(SI_ERROR_ENFORCE)
#define SI_ERROR(args) printf args
#else
-#define SI_ERROR(args) printf args
+#define SI_ERROR(args)
#endif // endif
#if defined(ENABLE_CORECAPTURE)
void *curwrap; /**< current wrapper va */
uint32 oob_router; /**< oob router registers for axi */
+ uint32 oob_router1; /**< oob router registers for axi */
si_cores_info_t *cores_info;
gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.c 771907 2018-07-12 11:19:34Z $
+ * $Id: wl_android.c 825470 2019-06-14 09:08:11Z $
*/
#include <linux/module.h>
#endif // endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
#endif // endif
#ifdef WL_NAN
#include <wl_cfgnan.h>
#include <bcmwifi_rspec.h>
#include <dhd_linux.h>
#include <bcmiov.h>
-#ifdef WL_MBO
-#include <mbo.h>
-#endif /* WL_MBO */
#ifdef WL_BCNRECV
#include <wl_cfgvendor.h>
+#include <brcm_nl80211.h>
#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+#include <mbo.h>
+#endif /* WL_MBO */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
#ifdef WL_STATIC_IF
#define WL_BSSIDX_MAX 16
#endif /* WL_STATIC_IF */
-#ifndef WL_CFG80211
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
-#endif
-
-uint android_msg_level = ANDROID_ERROR_LEVEL;
+uint android_msg_level = ANDROID_ERROR_LEVEL | ANDROID_MSG_LEVEL;
+
+#define ANDROID_ERROR_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printk(KERN_ERR "[dhd] ANDROID-ERROR) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_TRACE_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printk(KERN_INFO "[dhd] ANDROID-TRACE) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_INFO_MSG(x, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printk(KERN_INFO "[dhd] ANDROID-INFO) " x, ## args); \
+ } \
+ } while (0)
+#define ANDROID_ERROR(x) ANDROID_ERROR_MSG x
+#define ANDROID_TRACE(x) ANDROID_TRACE_MSG x
+#define ANDROID_INFO(x) ANDROID_INFO_MSG x
/*
* Android private command strings, PLEASE define new private commands here
#define CMD_BTCOEXMODE "BTCOEXMODE"
#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
#define CMD_SETSUSPENDMODE "SETSUSPENDMODE"
+#define CMD_SETDTIM_IN_SUSPEND "SET_DTIM_IN_SUSPEND"
#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND"
+#define CMD_DISDTIM_IN_SUSPEND "DISABLE_DTIM_IN_SUSPEND"
#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
#define CMD_SETFWPATH "SETFWPATH"
#define CMD_SETBAND "SETBAND"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
#define CMD_KEEP_ALIVE "KEEPALIVE"
-#ifdef BCMCCX
-/* CCX Private Commands */
-#define CMD_GETCCKM_RN "get cckm_rn"
-#define CMD_SETCCKM_KRK "set cckm_krk"
-#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies"
-
-#define CCKM_KRK_LEN 16
-#define CCKM_BTK_LEN 32
-#endif // endif
-
#ifdef PNO_SUPPORT
#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
#define CMD_PNOSETUP_SET "PNOSETUP "
#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA"
#define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE"
+#define BUFSZ 8
+#define BUFSZN BUFSZ + 1
+
+#define _S(x) #x
+#define S(x) _S(x)
+
+#define MAXBANDS 2 /**< Maximum #of bands */
+#define BAND_2G_INDEX 0
+#define BAND_5G_INDEX 0
+
+typedef union {
+ wl_roam_prof_band_v1_t v1;
+ wl_roam_prof_band_v2_t v2;
+ wl_roam_prof_band_v3_t v3;
+} wl_roamprof_band_t;
+
#ifdef WLWFDS
#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH"
#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH"
#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS"
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef SUPPORT_AP_SUSPEND
+#define CMD_SET_AP_SUSPEND "SET_AP_SUSPEND"
+#endif /* SUPPORT_AP_SUSPEND */
+
+#ifdef SUPPORT_AP_BWCTRL
+#define CMD_SET_AP_BW "SET_AP_BW"
+#define CMD_GET_AP_BW "GET_AP_BW"
+#endif /* SUPPORT_AP_BWCTRL */
+
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
#define MIRACAST_MODE_SOURCE 1
static LIST_HEAD(miracast_resume_list);
#ifdef WL_CFG80211
static u8 miracast_cur_mode;
-#endif
+#endif /* WL_CFG80211 */
#ifdef DHD_LOG_DUMP
#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP"
void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_STATUS_LOGGING
+#define CMD_DUMP_STATUS_LOG "DUMP_STAT_LOG"
+#define CMD_QUERY_STATUS_LOG "QUERY_STAT_LOG"
+#endif /* DHD_STATUS_LOGGING */
+
#ifdef DHD_DEBUG_UART
extern bool dhd_debug_uart_is_running(struct net_device *dev);
#endif /* DHD_DEBUG_UART */
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+#define CMD_GEOFENCE_INTERVAL "GEOFENCE_INT"
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+
struct io_cfg {
s8 *iovar;
s32 param;
struct list_head list;
};
+typedef enum {
+ HEAD_SAR_BACKOFF_DISABLE = -1,
+ HEAD_SAR_BACKOFF_ENABLE = 0,
+ GRIP_SAR_BACKOFF_DISABLE,
+ GRIP_SAR_BACKOFF_ENABLE,
+ NR_mmWave_SAR_BACKOFF_DISABLE,
+ NR_mmWave_SAR_BACKOFF_ENABLE,
+ NR_Sub6_SAR_BACKOFF_DISABLE,
+ NR_Sub6_SAR_BACKOFF_ENABLE,
+ SAR_BACKOFF_DISABLE_ALL
+} sar_modes;
+
#if defined(BCMFW_ROAM_ENABLE)
#define CMD_SET_ROAMPREF "SET_ROAMPREF"
#ifdef WL_BCNRECV
#define CMD_BEACON_RECV "BEACON_RECV"
#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define CMD_CAC_TSPEC "CAC_TSPEC"
+#endif /* WL_CAC_TS */
+#ifdef WL_CHAN_UTIL
+#define CMD_GET_CHAN_UTIL "GET_CU"
+#endif /* WL_CHAN_UTIL */
+
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+#define CMD_SET_SOFTAP_ELNA_BYPASS "SET_SOFTAP_ELNA_BYPASS"
+#define CMD_GET_SOFTAP_ELNA_BYPASS "GET_SOFTAP_ELNA_BYPASS"
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+
+#ifdef WL_NAN
+#define CMD_GET_NAN_STATUS "GET_NAN_STATUS"
+#endif /* WL_NAN */
/* drv command info structure */
typedef struct wl_drv_cmd_info {
#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */
#endif /* SUPPORT_LQCM */
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+#define NUMBER_SEQUENTIAL_PRIVCMD_ERRORS 7
+static int priv_cmd_errors = 0;
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
/**
* Extern function declarations (TODO: move them to dhd_linux.h)
*/
extern bool g_pm_control;
#endif /* DHD_PM_CONTROL_FROM_FILE */
+/* private command support for restoring roam/scan parameters */
+#ifdef SUPPORT_RESTORE_SCAN_PARAMS
+#define CMD_RESTORE_SCAN_PARAMS "RESTORE_SCAN_PARAMS"
+
+typedef int (*PRIV_CMD_HANDLER) (struct net_device *dev, char *command);
+typedef int (*PRIV_CMD_HANDLER_WITH_LEN) (struct net_device *dev, char *command, int total_len);
+
+enum {
+ RESTORE_TYPE_UNSPECIFIED = 0,
+ RESTORE_TYPE_PRIV_CMD = 1,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN = 2
+};
+
+typedef struct android_restore_scan_params {
+ char command[64];
+ int parameter;
+ int cmd_type;
+ union {
+ PRIV_CMD_HANDLER cmd_handler;
+ PRIV_CMD_HANDLER_WITH_LEN cmd_handler_w_len;
+ };
+} android_restore_scan_params_t;
+
+/* function prototypes of private command handler */
+static int wl_android_set_roam_trigger(struct net_device *dev, char* command);
+int wl_android_set_roam_delta(struct net_device *dev, char* command);
+int wl_android_set_roam_scan_period(struct net_device *dev, char* command);
+int wl_android_set_full_roam_scan_period(struct net_device *dev, char* command, int total_len);
+int wl_android_set_roam_scan_control(struct net_device *dev, char *command);
+int wl_android_set_scan_channel_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_time(struct net_device *dev, char *command);
+int wl_android_set_scan_home_away_time(struct net_device *dev, char *command);
+int wl_android_set_scan_nprobes(struct net_device *dev, char *command);
+static int wl_android_set_band(struct net_device *dev, char *command);
+int wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command);
+int wl_android_set_wes_mode(struct net_device *dev, char *command);
+int wl_android_set_okc_mode(struct net_device *dev, char *command);
+
+/* default values */
+#ifdef ROAM_API
+#define DEFAULT_ROAM_TIRGGER -75
+#define DEFAULT_ROAM_DELTA 10
+#define DEFAULT_ROAMSCANPERIOD 10
+#define DEFAULT_FULLROAMSCANPERIOD_SET 120
+#endif /* ROAM_API */
+#define DEFAULT_BAND 0
+
+/* restoring parameter list, please don't change order */
+static android_restore_scan_params_t restore_params[] =
+{
+/* wbtext need to be disabled while updating roam/scan parameters */
+#ifdef ROAM_API
+ { CMD_ROAMTRIGGER_SET, DEFAULT_ROAM_TIRGGER,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_trigger},
+ { CMD_ROAMDELTA_SET, DEFAULT_ROAM_DELTA,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_delta},
+ { CMD_ROAMSCANPERIOD_SET, DEFAULT_ROAMSCANPERIOD,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_period},
+ { CMD_FULLROAMSCANPERIOD_SET, DEFAULT_FULLROAMSCANPERIOD_SET,
+ RESTORE_TYPE_PRIV_CMD_WITH_LEN,
+ .cmd_handler_w_len = wl_android_set_full_roam_scan_period},
+#endif /* ROAM_API */
+ { CMD_SETBAND, DEFAULT_BAND,
+ RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_band},
+ { "\0", 0, RESTORE_TYPE_UNSPECIFIED, .cmd_handler = NULL}
+};
+#endif /* SUPPORT_RESTORE_SCAN_PARAMS */
+
/**
* Local (static) functions and variables
*/
smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
if (smbuf == NULL) {
- ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n",
- __FUNCTION__, WLC_IOCTL_MAXLEN));
+ ANDROID_ERROR(("wl_android_set_wfds_hash: failed to allocated memory %d bytes\n",
+ WLC_IOCTL_MAXLEN));
return -ENOMEM;
}
}
if (error) {
- ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error));
+ ANDROID_ERROR(("wl_android_set_wfds_hash: failed to %s, error=%d\n", command, error));
}
if (smbuf) {
/* Convert Kbps to Android Mbps */
link_speed = link_speed / 1000;
bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
- ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ ANDROID_INFO(("wl_android_get_link_speed: command result is %s\n", command));
return bytes_written;
}
/* Ap/GO mode
* driver rssi <sta_mac_addr>
*/
- ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim));
+ ANDROID_TRACE(("wl_android_get_rssi: cmd:%s\n", delim));
/* skip space from delim after finding char */
delim++;
if (!(bcm_ether_atoe((delim), &scbval.ea))) {
- ANDROID_ERROR(("%s:address err\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_get_rssi: address err\n"));
return -1;
}
scbval.val = htod32(0);
- ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet)));
+ ANDROID_TRACE(("wl_android_get_rssi: address:"MACDBG, MAC2STRDBG(scbval.ea.octet)));
#ifdef WL_VIRTUAL_APSTA
/* RSDB AP may have another virtual interface
* In this case, format of private command is as following,
}
else {
/* STA/GC mode */
- memset(&scbval, 0, sizeof(scb_val_t));
+ bzero(&scbval, sizeof(scb_val_t));
}
error = wldev_get_rssi(target_ndev, &scbval);
if (error)
return -1;
if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
- ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_get_rssi: wldev_get_ssid failed\n"));
} else if (total_len <= ssid.SSID_len) {
return -ENOMEM;
} else {
" rssi %d", scbval.val);
command[bytes_written] = '\0';
- ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ ANDROID_TRACE(("wl_android_get_rssi: command result is %s (%d)\n", command, bytes_written));
return bytes_written;
}
if (ret_now != suspend_flag) {
if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
- ANDROID_INFO(("%s: Suspend Flag %d -> %d\n",
- __FUNCTION__, ret_now, suspend_flag));
+ ANDROID_INFO(("wl_android_set_suspendopt: Suspend Flag %d -> %d\n",
+ ret_now, suspend_flag));
} else {
- ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_suspendopt: failed %d\n", ret));
}
}
suspend_flag = 1;
if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
- ANDROID_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+ ANDROID_INFO(("wl_android_set_suspendmode: Suspend Mode %d\n", suspend_flag));
else
- ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_suspendmode: failed %d\n", ret));
#endif // endif
return ret;
if (error)
return -1;
- ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode));
+ ANDROID_INFO(("wl_android_get_80211_mode: mode:%s\n", mode));
bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
- ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ ANDROID_INFO(("wl_android_get_80211_mode: command:%s EXIT\n", command));
return bytes_written;
}
return -1;
chanspec = wl_chspec_driver_to_host(chsp);
- ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec));
+ ANDROID_INFO(("wl_android_get_80211_mode: return value of chanspec:%x\n", chanspec));
channel = chanspec & WL_CHANSPEC_CHAN_MASK;
band = chanspec & WL_CHANSPEC_BAND_MASK;
bw = chanspec & WL_CHANSPEC_BW_MASK;
- ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw));
+ ANDROID_INFO(("wl_android_get_80211_mode: channel:%d band:%d bandwidth:%d\n",
+ channel, band, bw));
if (bw == WL_CHANSPEC_BW_80)
bw = WL_CH_BANDWIDTH_80MHZ;
bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw);
- ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ ANDROID_INFO(("wl_android_get_chanspec: command:%s EXIT\n", command));
return bytes_written;
}
-#endif
+#endif /* WL_CFG80211 */
/* returns current datarate datarate returned from firmware are in 500kbps */
int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
if (error)
return -1;
- ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate));
+ ANDROID_INFO(("wl_android_get_datarate: datarate:%d\n", datarate));
bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
return bytes_written;
sizeof(struct ether_addr) + sizeof(uint)] = {0};
struct maclist *assoc_maclist = (struct maclist *)mac_buf;
- ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__));
+ ANDROID_TRACE(("wl_android_get_assoclist: ENTER\n"));
assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
if ((len > 0) && (len < (total_len - bytes_written))) {
bytes_written += len;
} else {
- ANDROID_ERROR(("%s: Insufficient buffer %d, bytes_written %d\n",
- __FUNCTION__, total_len, bytes_written));
+ ANDROID_ERROR(("wl_android_get_assoclist: Insufficient buffer %d,"
+ " bytes_written %d\n",
+ total_len, bytes_written));
bytes_written = -1;
break;
}
u32 chnsp = 0;
int err = 0;
- ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command));
+ ANDROID_INFO(("wl_android_set_csa: command:%s\n", command));
command = (command + strlen(CMD_SET_CSA));
/* Order is mode, count channel */
if (!*++command) {
- ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_csa:error missing arguments\n"));
return -1;
}
csa_arg.mode = bcm_atoi(command);
}
if (!*++command) {
- ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_csa: error missing count\n"));
return -1;
}
command++;
csa_arg.chspec = 0;
command += 2;
if (!*command) {
- ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_csa: error missing channel\n"));
return -1;
}
chnsp = wf_chspec_aton(command);
if (chnsp == 0) {
- ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_csa:chsp is not correct\n"));
return -1;
}
chnsp = wl_chspec_host_to_driver(chnsp);
error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
smbuf, sizeof(smbuf), NULL);
if (error) {
- ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error));
+ ANDROID_ERROR(("wl_android_set_csa:set csa failed:%d\n", error));
return -1;
}
return 0;
}
-#endif
+#endif /* WL_CFG80211 */
+
+static int
+wl_android_set_bcn_li_dtim(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim;
+
+ dtim = *(command + strlen(CMD_SETDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (dtim > (MAX_DTIM_ALLOWED_INTERVAL / MAX_DTIM_SKIP_BEACON_INTERVAL)) {
+ ANDROID_ERROR(("%s: failed, invalid dtim %d\n",
+ __FUNCTION__, dtim));
+ return BCME_ERROR;
+ }
+
+ if (!(ret = net_os_set_suspend_bcn_li_dtim(dev, dtim))) {
+ ANDROID_TRACE(("%s: SET bcn_li_dtim in suspend %d\n",
+ __FUNCTION__, dtim));
+ } else {
+ ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+
+ return ret;
+}
static int
wl_android_set_max_dtim(struct net_device *dev, char *command)
dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
- ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n",
- __FUNCTION__, (dtim_flag ? "Enable" : "Disable")));
+ ANDROID_TRACE(("wl_android_set_max_dtim: use Max bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
} else {
- ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_max_dtim: failed %d\n", ret));
+ }
+
+ return ret;
+}
+
+#ifdef DISABLE_DTIM_IN_SUSPEND
+static int
+wl_android_set_disable_dtim_in_suspend(struct net_device *dev, char *command)
+{
+ int ret = 0;
+ int dtim_flag;
+
+ dtim_flag = *(command + strlen(CMD_DISDTIM_IN_SUSPEND) + 1) - '0';
+
+ if (!(ret = net_os_set_disable_dtim_in_suspend(dev, dtim_flag))) {
+ ANDROID_TRACE(("wl_android_set_disable_dtim_in_suspend: "
+ "use Disable bcn_li_dtim in suspend %s\n",
+ (dtim_flag ? "Enable" : "Disable")));
+ } else {
+ ANDROID_ERROR(("wl_android_set_disable_dtim_in_suspend: failed %d\n", ret));
}
return ret;
}
+#endif /* DISABLE_DTIM_IN_SUSPEND */
static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
{
return bytes_written;
}
+#ifdef WL_CFG80211
+static int
+wl_android_set_band(struct net_device *dev, char *command)
+{
+ int error = 0;
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+#ifdef WL_HOST_BAND_MGMT
+ int ret = 0;
+ if ((ret = wl_cfg80211_set_band(dev, band)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* If roam_var is unsupported, fallback to the original method */
+ ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
+ "but roam_band iovar unsupported in the firmware\n"));
+ } else {
+ error = -1;
+ }
+ }
+ if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
+ /* Apply if roam_band iovar is not supported or band setting is AUTO */
+ error = wldev_set_band(dev, band);
+ }
+#else
+ error = wl_cfg80211_set_if_band(dev, band);
+#endif /* WL_HOST_BAND_MGMT */
+#ifdef ROAM_CHANNEL_CACHE
+ wl_update_roamscan_cache_by_band(dev, band);
+#endif /* ROAM_CHANNEL_CACHE */
+ return error;
+}
+#endif /* WL_CFG80211 */
+
#ifdef PNO_SUPPORT
#define PNO_PARAM_SIZE 50
#define VALUE_SIZE 50
char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1];
struct dhd_pno_batch_params batch_params;
- ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ ANDROID_INFO(("wls_parse_batching_cmd: command=%s, len=%d\n", command, total_len));
len_remain = total_len;
if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) {
pos = command + strlen(CMD_WLS_BATCHING) + 1;
len_remain -= strlen(CMD_WLS_BATCHING) + 1;
} else {
- ANDROID_ERROR(("%s: No arguments, total_len %d\n", __FUNCTION__, total_len));
+ ANDROID_ERROR(("wls_parse_batching_cmd: No arguments, total_len %d\n", total_len));
err = BCME_ERROR;
goto exit;
}
- memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+ bzero(&batch_params, sizeof(struct dhd_pno_batch_params));
if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) {
pos += strlen(PNO_BATCHING_SET) + 1;
} else {
- ANDROID_ERROR(("%s: %s missing arguments, total_len %d\n",
- __FUNCTION__, PNO_BATCHING_SET, total_len));
+ ANDROID_ERROR(("wls_parse_batching_cmd: %s missing arguments, total_len %d\n",
+ PNO_BATCHING_SET, total_len));
err = BCME_ERROR;
goto exit;
}
while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
- memset(param, 0, sizeof(param));
- memset(value, 0, sizeof(value));
+ bzero(param, sizeof(param));
+ bzero(value, sizeof(value));
if (token == NULL || !*token)
break;
if (*token == '\0')
tokens = sscanf(value, "<%s>", value);
if (tokens != 1) {
err = BCME_ERROR;
- ANDROID_ERROR(("%s : invalid format for channel"
- " <> params\n", __FUNCTION__));
+ ANDROID_ERROR(("wls_parse_batching_cmd: invalid format"
+ " for channel"
+ " <> params\n"));
goto exit;
}
while ((token2 = strsep(&pos2,
batch_params.rtt = simple_strtol(value, NULL, 0);
ANDROID_INFO(("rtt : %d\n", batch_params.rtt));
} else {
- ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+ ANDROID_ERROR(("wls_parse_batching_cmd : unknown param: %s\n", param));
err = BCME_ERROR;
goto exit;
}
if (err < 0) {
ANDROID_ERROR(("failed to configure batch scan\n"));
} else {
- memset(command, 0, total_len);
+ bzero(command, total_len);
err = snprintf(command, total_len, "%d", err);
}
} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
if (err < 0) {
ANDROID_ERROR(("failed to stop batching scan\n"));
} else {
- memset(command, 0, total_len);
+ bzero(command, total_len);
err = snprintf(command, total_len, "OK");
}
} else {
- ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__));
+ ANDROID_ERROR(("wls_parse_batching_cmd : unknown command\n"));
err = BCME_ERROR;
goto exit;
}
0x00
};
#endif /* PNO_SET_DEBUG */
- ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ ANDROID_INFO(("wl_android_set_pno_setup: command=%s, len=%d\n", command, total_len));
if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
- ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+ ANDROID_ERROR(("wl_android_set_pno_setup: argument=%d less min size\n", total_len));
goto exit_proc;
}
#ifdef PNO_SET_DEBUG
tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
- memset(ssids_local, 0, sizeof(ssids_local));
+ bzero(ssids_local, sizeof(ssids_local));
if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
goto exit_proc;
} else {
if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
- ANDROID_ERROR(("%s scan duration corrupted field size %d\n",
- __FUNCTION__, tlv_size_left));
+ ANDROID_ERROR(("wl_android_set_pno_setup: scan duration corrupted"
+ " field size %d\n",
+ tlv_size_left));
goto exit_proc;
}
str_ptr++;
pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+ ANDROID_INFO(("wl_android_set_pno_setup: pno_time=%d\n", pno_time));
if (str_ptr[0] != 0) {
if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
- ANDROID_ERROR(("%s pno repeat : corrupted field\n",
- __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_pno_setup: pno repeat:"
+ " corrupted field\n"));
goto exit_proc;
}
str_ptr++;
pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ ANDROID_INFO(("wl_android_set_pno_setup: got pno_repeat=%d\n",
+ pno_repeat));
if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
- ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
- __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_pno_setup: FREQ_EXPO_MAX"
+ " corrupted field size\n"));
goto exit_proc;
}
str_ptr++;
pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("%s: pno_freq_expo_max=%d\n",
- __FUNCTION__, pno_freq_expo_max));
+ ANDROID_INFO(("wl_android_set_pno_setup: pno_freq_expo_max=%d\n",
+ pno_freq_expo_max));
}
}
} else {
- ANDROID_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_pno_setup: get wrong TLV command\n"));
goto exit_proc;
}
#define MAC_ADDR_STR_LEN 18
if (total_len < MAC_ADDR_STR_LEN) {
- ANDROID_ERROR(("%s: buflen %d is less than p2p dev addr\n",
- __FUNCTION__, total_len));
+ ANDROID_ERROR(("wl_android_get_p2p_dev_addr: buflen %d is less than p2p dev addr\n",
+ total_len));
return -1;
}
ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr);
if (ret) {
- ANDROID_ERROR(("%s Failed to get p2p dev addr\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_get_p2p_dev_addr: Failed to get p2p dev addr\n"));
return -1;
}
return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr)));
}
-#ifdef BCMCCX
-static int wl_android_get_cckm_rn(struct net_device *dev, char *command)
-{
- int error, rn;
-
- WL_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name));
-
- error = wldev_iovar_getint(dev, "cckm_rn", &rn);
- if (unlikely(error)) {
- ANDROID_ERROR(("wl_android_get_cckm_rn error (%d)\n", error));
- return -1;
- }
- memcpy(command, &rn, sizeof(int));
-
- return sizeof(int);
-}
-
-static int
-wl_android_set_cckm_krk(struct net_device *dev, char *command, int total_len)
-{
- int error, key_len, skip_len;
- unsigned char key[CCKM_KRK_LEN + CCKM_BTK_LEN];
- char iovar_buf[WLC_IOCTL_SMLEN];
-
- WL_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name));
-
- skip_len = strlen("set cckm_krk")+1;
-
- if (total_len < (skip_len + CCKM_KRK_LEN)) {
- return BCME_BADLEN;
- }
-
- if (total_len >= skip_len + CCKM_KRK_LEN + CCKM_BTK_LEN) {
- key_len = CCKM_KRK_LEN + CCKM_BTK_LEN;
- } else {
- key_len = CCKM_KRK_LEN;
- }
-
- memset(iovar_buf, 0, sizeof(iovar_buf));
- memcpy(key, command+skip_len, key_len);
-
- ANDROID_INFO(("CCKM KRK-BTK (%d/%d) :\n", key_len, total_len));
- if (wl_dbg_level & WL_DBG_DBG) {
- prhex(NULL, key, key_len);
- }
-
- error = wldev_iovar_setbuf(dev, "cckm_krk", key, key_len,
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR((" cckm_krk set error (%d)\n", error));
- return -1;
- }
- return 0;
-}
-
-static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command, int total_len)
-{
- int error;
- u8 buf[WL_ASSOC_INFO_MAX];
- wl_assoc_info_t assoc_info;
- u32 resp_ies_len = 0;
- int bytes_written = 0;
-
- WL_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name));
-
- error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR(("could not get assoc info (%d)\n", error));
- return -1;
- }
-
- memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t));
- assoc_info.req_len = htod32(assoc_info.req_len);
- assoc_info.resp_len = htod32(assoc_info.resp_len);
- assoc_info.flags = htod32(assoc_info.flags);
-
- if (assoc_info.resp_len) {
- resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
- }
-
- if (total_len < (sizeof(u32) + resp_ies_len)) {
- ANDROID_ERROR(("%s: Insufficient memory, %d bytes\n",
- __FUNCTION__, total_len));
- return -1;
- }
- /* first 4 bytes are ie len */
- memcpy(command, &resp_ies_len, sizeof(u32));
- bytes_written = sizeof(u32);
-
- /* get the association resp IE's if there are any */
- if (resp_ies_len) {
- error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0,
- buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR(("could not get assoc resp_ies (%d)\n", error));
- return -1;
- }
-
- memcpy(command+sizeof(u32), buf, resp_ies_len);
- bytes_written += resp_ies_len;
- }
- return bytes_written;
-}
-
-#endif /* BCMCCX */
-
int
wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
{
/* set filtering mode */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) {
- ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACMODE error=%d\n", ret));
return ret;
}
if (macmode != MACLIST_MODE_DISABLED) {
/* set the MAC filter list */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist,
sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
- ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACLIST error=%d\n", ret));
return ret;
}
/* get the current list of associated STAs */
assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist,
sizeof(mac_buf))) != 0) {
- ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_ap_mac_list: WLC_GET_ASSOCLIST error=%d\n",
+ ret));
return ret;
}
/* do we have any STA associated? */
match = 0;
/* compare with each entry */
for (j = 0; j < maclist->count; j++) {
- ANDROID_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+ ANDROID_INFO(("wl_android_set_ap_mac_list: associated="MACDBG
+ "list = "MACDBG "\n",
+ MAC2STRDBG(assoc_maclist->ea[i].octet),
MAC2STRDBG(maclist->ea[j].octet)));
if (memcmp(assoc_maclist->ea[i].octet,
maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
if ((ret = wldev_ioctl_set(dev,
WLC_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scb_val_t))) != 0)
- ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
- __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_ap_mac_list:"
+ " WLC_SCB_DEAUTHENTICATE"
+ " error=%d\n",
+ ret));
}
}
}
macmode = bcm_atoi(token);
if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
- ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid macmode %d\n", macmode));
return -1;
}
}
macnum = bcm_atoi(token);
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n",
- __FUNCTION__, macnum));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid number of MAC"
+ " address entries %d\n",
+ macnum));
return -1;
}
/* allocate memory for the MAC list */
list = (struct maclist*) MALLOCZ(dhd->osh, sizeof(int) +
sizeof(struct ether_addr) * macnum);
if (!list) {
- ANDROID_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : failed to allocate memory\n"));
return -1;
}
/* prepare the MAC list */
for (i = 0; i < list->count; i++) {
token = strsep((char**)&str, " ");
if (token == NULL) {
- ANDROID_ERROR(("%s : No mac address present\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : No mac address present\n"));
ret = -EINVAL;
goto exit;
}
- strncpy(eabuf, token, ETHER_ADDR_STR_LEN - 1);
+ strlcpy(eabuf, token, sizeof(eabuf));
if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
- ANDROID_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
- __FUNCTION__, i, eabuf));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter : mac parsing err index=%d,"
+ " addr=%s\n",
+ i, eabuf));
list->count = i;
break;
}
- ANDROID_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+ ANDROID_INFO(("wl_android_set_mac_address_filter : %d/%d MACADDR=%s",
+ i, list->count, eabuf));
}
if (i == 0)
goto exit;
/* set the list */
if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
- ANDROID_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_set_mac_address_filter: Setting MAC list failed error=%d\n",
+ ret));
exit:
MFREE(dhd->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum);
int retry = POWERUP_MAX_RETRY;
if (!dev) {
- ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_wifi_on: dev is null\n"));
return -EINVAL;
}
- printf("%s in 1\n", __FUNCTION__);
dhd_net_if_lock(dev);
- printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
+ WL_MSG(dev->name, "in g_wifi_on=%d\n", g_wifi_on);
if (!g_wifi_on) {
do {
dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
} while (retry-- > 0);
if (ret != 0) {
ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+#ifdef BCM_DETECT_TURN_ON_FAILURE
+ BUG_ON(1);
+#endif /* BCM_DETECT_TURN_ON_FAILURE */
goto exit;
}
#if defined(BCMSDIO) || defined(BCMDBUS)
}
exit:
- printf("%s: Success\n", __FUNCTION__);
+ WL_MSG(dev->name, "Success\n");
dhd_net_if_unlock(dev);
return ret;
dhd_net_bus_suspend(dev);
#endif /* BCMSDIO */
dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
- printf("%s: Failed\n", __FUNCTION__);
+ WL_MSG(dev->name, "Failed\n");
dhd_net_if_unlock(dev);
return ret;
#endif /* BCMSDIO || BCMDBUS */
return -EINVAL;
}
- printf("%s in 1\n", __FUNCTION__);
#if defined(BCMPCIE) && defined(DHD_DEBUG_UART)
ret = dhd_debug_uart_is_running(dev);
if (ret) {
- ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_wifi_off: - Debug UART App is running\n"));
return -EBUSY;
}
#endif /* BCMPCIE && DHD_DEBUG_UART */
dhd_net_if_lock(dev);
- printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure);
+ WL_MSG(dev->name, "in g_wifi_on=%d, on_failure=%d\n", g_wifi_on, on_failure);
if (g_wifi_on || on_failure) {
#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
ret = dhd_net_bus_devreset(dev, TRUE);
-#if defined(BCMSDIO)
+#ifdef BCMSDIO
dhd_net_bus_suspend(dev);
#endif /* BCMSDIO */
#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
g_wifi_on = FALSE;
}
- printf("%s out\n", __FUNCTION__);
+ WL_MSG(dev->name, "out\n");
dhd_net_if_unlock(dev);
return ret;
u8 result[WLC_IOCTL_SMLEN];
chanim_stats_t *stats;
- memset(¶m, 0, sizeof(param));
+ bzero(¶m, sizeof(param));
param.buflen = htod32(sizeof(wl_chanim_stats_t));
param.count = htod32(WL_CHANIM_COUNT_ONE);
#ifndef DISABLE_IF_COUNTERS
wl_if_stats_t* if_stats = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
#endif /* DISABLE_IF_COUNTERS */
int link_speed = 0;
int bytes_written = -1;
int ret = 0;
- ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
+ ANDROID_INFO(("wl_android_get_connection_stats: enter Get Connection Stats\n"));
if (total_len <= 0) {
- ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
+ ANDROID_ERROR(("wl_android_get_connection_stats: invalid buffer size %d\n", total_len));
goto error;
}
bufsize = total_len;
if (bufsize < sizeof(struct connection_stats)) {
- ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n",
- __FUNCTION__, bufsize,
+ ANDROID_ERROR(("wl_android_get_connection_stats: not enough buffer size, provided=%u,"
+ " requires=%zu\n",
+ bufsize,
sizeof(struct connection_stats)));
goto error;
}
#ifndef DISABLE_IF_COUNTERS
if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats));
if (if_stats == NULL) {
- ANDROID_ERROR(("%s(%d): MALLOCZ failed\n", __FUNCTION__, __LINE__));
+ ANDROID_ERROR(("wl_android_get_connection_stats: MALLOCZ failed\n"));
goto error;
}
- memset(if_stats, 0, sizeof(*if_stats));
+ bzero(if_stats, sizeof(*if_stats));
if (FW_SUPPORTED(dhdp, ifst)) {
ret = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else {
+ } else
+ {
ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
(char *)if_stats, sizeof(*if_stats), NULL);
}
+ ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
if (ret) {
- ANDROID_ERROR(("%s: if_counters not supported ret=%d\n",
- __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_get_connection_stats: if_counters not supported ret=%d\n",
+ ret));
/* In case if_stats IOVAR is not supported, get information from counters. */
#endif /* DISABLE_IF_COUNTERS */
}
ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
if (ret != BCME_OK) {
- ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n",
- __FUNCTION__, ret));
+ ANDROID_ERROR(("wl_android_get_connection_stats:"
+ " wl_cntbuf_to_xtlv_format ERR %d\n",
+ ret));
goto error;
}
if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
- ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__));
+ ANDROID_ERROR(("wl_android_get_connection_stats: wlc_cnt NULL!\n"));
goto error;
}
} else {
/* Populate from if_stats. */
if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
- ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n",
- __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version));
+ ANDROID_ERROR(("wl_android_get_connection_stats: incorrect version of"
+ " wl_if_stats_t,"
+ " expected=%u got=%u\n",
+ WL_IF_STATS_T_VERSION, if_stats->version));
goto error;
}
/* link_speed is in kbps */
ret = wldev_get_link_speed(dev, &link_speed);
if (ret || link_speed < 0) {
- ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
- __FUNCTION__, ret, link_speed));
+ ANDROID_ERROR(("wl_android_get_connection_stats: wldev_get_link_speed()"
+ " failed, ret=%d, speed=%d\n",
+ ret, link_speed));
goto error;
}
/* If natoe subcmd name is not provided, return error */
if (*pcmd == '\0') {
- ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__));
+ ANDROID_ERROR(("natoe subcmd not provided wl_android_process_natoe_cmd\n"));
ret = -EINVAL;
return ret;
}
}
else
{
- ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res));
+ ANDROID_ERROR(("wl_natoe_get_ioctl: get command failed code %d\n", res));
res = BCME_ERROR;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_enable\n"));
ret = -EINVAL;
}
} else { /* set */
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ips\n"));
ret = -EINVAL;
}
} else { /* set */
/* save buflen at start */
uint16 buflen_at_start = buflen;
- memset(&config_ips, 0, sizeof(config_ips));
+ bzero(&config_ips, sizeof(config_ips));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) {
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ports\n"));
ret = -EINVAL;
}
} else { /* set */
/* save buflen at start */
uint16 buflen_at_start = buflen;
- memset(&ports_config, 0, sizeof(ports_config));
+ bzero(&ports_config, sizeof(ports_config));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str) {
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MAXLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_dbg_stats\n"));
ret = -EINVAL;
}
} else { /* set */
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+ ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_tbl_cnt\n"));
ret = -EINVAL;
}
} else { /* set */
ret = -EINVAL;
goto exit;
}
+ /* Skip for CUSTOMER_HW4 - WNM notification
+ * for cellular data capability is handled by host
+ */
/* send a WNM notification request to associated AP */
if (wl_get_drv_status(cfg, CONNECTED, dev)) {
ANDROID_INFO(("Sending WNM Notif\n"));
}
#endif /* WL_MBO */
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+extern int wl_cfg80211_send_msg_to_ril(void);
+extern void wl_cfg80211_register_dev_ril_bridge_event_notifier(void);
+extern void wl_cfg80211_unregister_dev_ril_bridge_event_notifier(void);
+extern int g_mhs_chan_for_cpcoex;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
#if defined(WL_SUPPORT_AUTO_CHANNEL)
/* SoftAP feature */
#define APCS_BAND_2G_LEGACY1 20
#define APCS_MAX_RETRY 10
#define APCS_DEFAULT_2G_CH 1
#define APCS_DEFAULT_5G_CH 149
+
+#ifdef WL_ESCAN
+static int
+wl_android_escan_autochannel(struct net_device *dev, uint32 band)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wlc_ssid_t ssid;
+ struct wl_escan_info *escan = NULL;
+ int ret = 0, retry = 0, retry_max, retry_interval = 250, channel = 0, up = 1;
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+
+ escan = dhd->escan;
+ if (dhd) {
+ retry_max = WL_ESCAN_TIMER_INTERVAL_MS/retry_interval;
+ memset(&ssid, 0, sizeof(ssid));
+ ret = wldev_ioctl_get(dev, WLC_GET_UP, &up, sizeof(s32));
+ if (ret < 0 || up == 0) {
+ ret = wldev_ioctl_set(dev, WLC_UP, &up, sizeof(s32));
+ }
+ retry = retry_max;
+ while (retry--) {
+#ifdef WL_CFG80211
+ if (wl_get_drv_status_all(cfg, SCANNING) ||
+ escan->escan_state == ESCAN_STATE_SCANING)
+#else
+ if (escan->escan_state == ESCAN_STATE_SCANING)
+#endif
+ {
+ ANDROID_INFO(("Scanning %d tried, ret = %d\n",
+ (retry_max - retry), ret));
+ } else {
+ escan->autochannel = 1;
+ ret = wl_escan_set_scan(dev, dhd, &ssid, TRUE);
+ if (!ret)
+ break;
+ }
+ OSL_SLEEP(retry_interval);
+ }
+ if ((retry == 0) || (ret < 0))
+ goto done;
+ retry = retry_max;
+ while (retry--) {
+ if (escan->escan_state == ESCAN_STATE_IDLE) {
+ if (band == WLC_BAND_5G)
+ channel = escan->best_5g_ch;
+ else
+ channel = escan->best_2g_ch;
+ WL_MSG(dev->name, "selected channel = %d\n", channel);
+ goto done;
+ }
+ ANDROID_INFO(("escan_state=%d, %d tried, ret = %d\n",
+ escan->escan_state, (retry_max - retry), ret));
+ OSL_SLEEP(retry_interval);
+ }
+ if ((retry == 0) || (ret < 0))
+ goto done;
+ }
+
+done:
+ if (escan)
+ escan->autochannel = 0;
+
+ return channel;
+}
+#endif /* WL_ESCAN */
+
static int
wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str,
char* command, int total_len)
int ret = 0;
int spect = 0;
u8 *reqbuf = NULL;
- uint32 band = WLC_BAND_2G;
+ uint32 band = WLC_BAND_2G, sta_band = WLC_BAND_2G;
uint32 buf_size;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- char *pos = command;
- int band_new, band_cur;
if (cmd_str) {
ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
- if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
+ if (strnicmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
band = WLC_BAND_AUTO;
- } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+ } else if (strnicmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
band = WLC_BAND_5G;
- } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+ } else if (strnicmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
band = WLC_BAND_2G;
} else {
/*
(channel == APCS_BAND_2G_LEGACY2)) {
band = WLC_BAND_2G;
} else {
- ANDROID_ERROR(("%s: Invalid argument\n", __FUNCTION__));
+ ANDROID_ERROR(("Invalid argument\n"));
return -EINVAL;
}
}
} else {
/* If no argument is provided, default to 2G */
- ANDROID_ERROR(("%s: No argument given default to 2.4G scan\n", __FUNCTION__));
+ ANDROID_ERROR(("No argument given default to 2.4G scan\n"));
band = WLC_BAND_2G;
}
- ANDROID_INFO(("%s : HAPD_AUTO_CHANNEL = %d, band=%d \n", __FUNCTION__, channel, band));
+ ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band));
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+ wl_cfg80211_register_dev_ril_bridge_event_notifier();
+ if (band == WLC_BAND_2G) {
+ wl_cfg80211_send_msg_to_ril();
- ret = wldev_ioctl_set(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur));
+ if (g_mhs_chan_for_cpcoex) {
+ channel = g_mhs_chan_for_cpcoex;
+ g_mhs_chan_for_cpcoex = 0;
+ goto done2;
+ }
+ }
+ wl_cfg80211_unregister_dev_ril_bridge_event_notifier();
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
/* If STA is connected, return is STA channel, else ACS can be issued,
* set spect to 0 and proceed with ACS
*/
channel = wl_cfg80211_get_sta_channel(cfg);
if (channel) {
- channel = (channel <= CH_MAX_2G_CHANNEL) ?
- channel : APCS_DEFAULT_2G_CH;
+ sta_band = WL_GET_BAND(channel);
+ switch (sta_band) {
+ case (WL_CHANSPEC_BAND_5G): {
+ if (band == WLC_BAND_2G || band == WLC_BAND_AUTO) {
+ channel = APCS_DEFAULT_2G_CH;
+ }
+ break;
+ }
+ case (WL_CHANSPEC_BAND_2G): {
+ if (band == WLC_BAND_5G) {
+ channel = APCS_DEFAULT_5G_CH;
+ }
+ break;
+ }
+ default:
+ /* Intentional fall through to use same sta channel for softap */
+ break;
+ }
+ WL_MSG(dev->name, "band=%d, sta_band=%d, channel=%d\n", band, sta_band, channel);
goto done2;
}
+#ifdef WL_ESCAN
+ channel = wl_android_escan_autochannel(dev, band);
+ if (channel)
+ goto done2;
+ else
+ goto done;
+#endif /* WL_ESCAN */
+
ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect));
if (ret) {
- ANDROID_ERROR(("%s: ACS: error getting the spect, ret=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("ACS: error getting the spect, ret=%d\n", ret));
goto done;
}
if (spect > 0) {
ret = wl_cfg80211_set_spect(dev, 0);
if (ret < 0) {
- ANDROID_ERROR(("%s: ACS: error while setting spect, ret=%d\n", __FUNCTION__, ret));
+ ANDROID_ERROR(("ACS: error while setting spect, ret=%d\n", ret));
goto done;
}
}
reqbuf = (u8 *)MALLOCZ(cfg->osh, CHANSPEC_BUF_SIZE);
if (reqbuf == NULL) {
- ANDROID_ERROR(("%s: failed to allocate chanspec buffer\n", __FUNCTION__));
+ ANDROID_ERROR(("failed to allocate chanspec buffer\n"));
return -ENOMEM;
}
if (band == WLC_BAND_AUTO) {
- ANDROID_INFO(("%s: ACS full channel scan \n", __FUNCTION__));
+ ANDROID_INFO(("ACS full channel scan \n"));
reqbuf[0] = htod32(0);
} else if (band == WLC_BAND_5G) {
- band_new = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
- ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_new, sizeof(band_new));
- if (ret < 0)
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
- ANDROID_INFO(("%s: ACS 5G band scan \n", __FUNCTION__));
+ ANDROID_INFO(("ACS 5G band scan \n"));
if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n"));
goto done;
* If channel argument is not provided/ argument 20 is provided,
* Restrict channel to 2GHz, 20MHz BW, No SB
*/
- ANDROID_INFO(("%s: ACS 2G band scan \n", __FUNCTION__));
+ ANDROID_INFO(("ACS 2G band scan \n"));
if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n"));
goto done;
goto done2;
}
- buf_size = CHANSPEC_BUF_SIZE;
+ buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE;
ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
buf_size);
if (ret < 0) {
- ANDROID_ERROR(("%s: can't start auto channel scan, err = %d\n",
- __FUNCTION__, ret));
+ ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret));
channel = 0;
goto done;
}
chosen = dtoh32(chosen);
}
- if ((ret == 0) && (dtoh32(chosen) != 0)) {
- uint chip;
- chip = dhd_conf_get_chip(dhd_get_pub(dev));
- if (chip != BCM43143_CHIP_ID) {
- u32 chanspec = 0;
- chanspec = wl_chspec_driver_to_host(chosen);
- ANDROID_INFO(("%s: selected chanspec = 0x%x\n", __FUNCTION__, chanspec));
- chosen = wf_chspec_ctlchan(chanspec);
- ANDROID_INFO(("%s: selected chosen = 0x%x\n", __FUNCTION__, chosen));
- }
- }
-
if (chosen) {
int chosen_band;
int apcs_band;
#endif /* D11AC_IOTYPES */
apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
- if (band == WLC_BAND_AUTO) {
- printf("%s: selected channel = %d\n", __FUNCTION__, channel);
- break;
- } else if (apcs_band == chosen_band) {
- printf("%s: selected channel = %d\n", __FUNCTION__, channel);
+ if (apcs_band == chosen_band) {
+ WL_MSG(dev->name, "selected channel = %d\n", channel);
break;
}
}
- ANDROID_INFO(("%s: %d tried, ret = %d, chosen = 0x%x\n", __FUNCTION__,
+ ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x\n",
(APCS_MAX_RETRY - retry), ret, chosen));
OSL_SLEEP(250);
}
} else {
channel = APCS_DEFAULT_2G_CH;
}
- ANDROID_ERROR(("%s: ACS failed."
- " Fall back to default channel (%d) \n", __FUNCTION__, channel));
+ ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel));
}
done2:
- ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur));
- if (ret < 0)
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
if (spect > 0) {
if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) {
- ANDROID_ERROR(("%s: ACS: error while setting spect\n", __FUNCTION__));
+ ANDROID_ERROR(("ACS: error while setting spect\n"));
}
}
}
if (channel) {
- if (channel < 15)
- pos += snprintf(pos, total_len, "2g=");
- else
- pos += snprintf(pos, total_len, "5g=");
- pos += snprintf(pos, total_len, "%d", channel);
- ANDROID_INFO(("%s: command result is %s \n", __FUNCTION__, command));
- return strlen(command);
- } else {
- return ret;
+ ret = snprintf(command, total_len, "%d", channel);
+ ANDROID_INFO(("command result is %s \n", command));
}
+
+ return ret;
}
#endif /* WL_SUPPORT_AUTO_CHANNEL */
return -ENOMEM;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
- vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strlcpy(vndr_ie->cmd, "add", sizeof(vndr_ie->cmd));
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
}
return -ENOMEM;
}
- memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */
+ bzero(ioctl_buf, WLC_IOCTL_MEDLEN); /* init the buffer */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
ANDROID_ERROR(("Find index failed\n"));
err = BCME_ERROR;
return err;
}
-#endif
+#endif /* WL_CFG80211 */
#if defined(BCMFW_ROAM_ENABLE)
static int
if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
return -1;
- memset(buf, 0, sizeof(buf));
- memset(akm_suites, 0, sizeof(akm_suites));
- memset(ucipher_suites, 0, sizeof(ucipher_suites));
+ bzero(buf, sizeof(buf));
+ bzero(akm_suites, sizeof(akm_suites));
+ bzero(ucipher_suites, sizeof(ucipher_suites));
/* Save the AKM suites passed in the command */
for (i = 0; i < num_akm_suites; i++) {
memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
/* Set to 0 to match any available multicast cipher */
- memset(pref, 0, WPA_SUITE_LEN);
+ bzero(pref, WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
}
}
struct list_head *cur, *q;
s32 ret = 0;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_safe(cur, q, head) {
config = list_entry(cur, struct io_cfg, list);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (config->iovar) {
if (!ret)
ret = wldev_iovar_setint(dev, config->iovar,
wl_android_iolist_resume(dev, &miracast_resume_list);
miracast_cur_mode = MIRACAST_MODE_OFF;
- memset((void *)&config, 0, sizeof(config));
+
+ bzero((void *)&config, sizeof(config));
switch (mode) {
case MIRACAST_MODE_SOURCE:
#ifdef MIRACAST_MCHAN_ALGO
wl_android_iolist_resume(dev, &miracast_resume_list);
return ret;
}
-#endif
+#endif /* WL_CFG80211 */
+#ifdef WL_RELMCAST
#define NETLINK_OXYGEN 30
#define AIBSS_BEACON_TIMEOUT 10
nlmsg_failure:
return ret;
}
+#endif /* WL_RELMCAST */
int wl_keep_alive_set(struct net_device *dev, char* extra)
{
}
ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+ bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
mkeep_alive_pkt.period_msec = period_msec;
mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
int err = BCME_OK;
char buf[WLC_IOCTL_SMLEN];
tbow_setup_netinfo_t netinfo;
- memset(&netinfo, 0, sizeof(netinfo));
+ bzero(&netinfo, sizeof(netinfo));
netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
uint encode, txexp;
wl_bss_info_t *bi;
int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
- char buf[datalen];
+ char buf[WLC_IOCTL_SMLEN];
+
+ if (datalen > WLC_IOCTL_SMLEN) {
+ ANDROID_ERROR(("data too big\n"));
+ return -1;
+ }
- memset(buf, 0, datalen);
+ bzero(buf, datalen);
/* get BSS information */
*(u32 *) buf = htod32(datalen);
error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen);
s32 bssidx;
int ret = 0;
int p2plo_pause = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd_pub_t *dhd = NULL;
if (!cfg || !cfg->p2p) {
ANDROID_ERROR(("Wl %p or cfg->p2p %p is null\n",
cfg, cfg ? cfg->p2p : 0));
return 0;
}
+
+ dhd = (dhd_pub_t *)(cfg->pub);
if (!dhd->up) {
ANDROID_ERROR(("bus is already down\n"));
return ret;
}
return ret;
}
+void
+wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev;
+ if (!cfg) {
+ return;
+ }
+
+ wdev = bcmcfg_to_p2p_wdev(cfg);
+
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_INFORM_MEM(("P2P_FIND: Discovery offload is already in progress."
+ "it aborted\n"));
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ if (wdev != NULL) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(wdev,
+ cfg->last_roc_id,
+ &cfg->remain_on_chan,
+ cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+ wl_cfg80211_p2plo_deinit(cfg);
+ }
+}
#endif /* P2P_LISTEN_OFFLOADING */
#ifdef WL_MURX
}
/* If successful intiate a reassoc */
- memset(&bssid, 0, ETHER_ADDR_LEN);
+ bzero(&bssid, ETHER_ADDR_LEN);
if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
ANDROID_ERROR(("Failed to get bssid, error=%d\n", err));
return err;
int bytes_written = 0;
bool mimo_rssi = FALSE;
- memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t));
+ bzero(&rssi_ant_mimo, sizeof(wl_rssi_ant_mimo_t));
/*
* STA I/F: DRIVER GET_RSSI_PER_ANT <ifname> <mimo>
* AP/GO I/F: DRIVER GET_RSSI_PER_ANT <ifname> <Peer MAC addr> <mimo>
char *pos, *token;
int err = BCME_OK;
- memset(&set_param, 0, sizeof(rssilog_set_param_t));
+ bzero(&set_param, sizeof(rssilog_set_param_t));
/*
* DRIVER SET_RSSI_LOGGING <enable/disable> <RSSI Threshold> <Time Threshold>
*/
#ifdef SET_PCIE_IRQ_CPU_CORE
void
-wl_android_set_irq_cpucore(struct net_device *net, int set)
+wl_android_set_irq_cpucore(struct net_device *net, int affinity_cmd)
{
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
if (!dhdp) {
ANDROID_ERROR(("dhd is NULL\n"));
return;
}
- dhd_set_irq_cpucore(dhdp, set);
+
+ if (affinity_cmd < PCIE_IRQ_AFFINITY_OFF || affinity_cmd > PCIE_IRQ_AFFINITY_LAST) {
+ ANDROID_ERROR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__));
+ return;
+ }
+
+ dhd_set_irq_cpucore(dhdp, affinity_cmd);
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
tx_lqcm_idx = (lqcm_report & LQCM_TX_INDEX_MASK) >> LQCM_TX_INDEX_SHIFT;
rx_lqcm_idx = (lqcm_report & LQCM_RX_INDEX_MASK) >> LQCM_RX_INDEX_SHIFT;
- ANDROID_ERROR(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
+ ANDROID_INFO(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
bytes_written = snprintf(command, total_len, "%s %d",
CMD_GET_LQCM_REPORT, lqcm_report);
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
ANDROID_INFO(("ifacename %s\n", name));
bytes_written = wl_get_ap_rps(dev, command, name, total_len);
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
ANDROID_INFO(("enable %d, ifacename %s\n", enable, name));
err = wl_set_ap_rps(dev, enable? TRUE: FALSE, name);
int err = BCME_OK;
char name[IFNAMSIZ];
- memset(&rps, 0, sizeof(rps));
+ bzero(&rps, sizeof(rps));
/*
* DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
*/
if (!token)
return -EINVAL;
ifname = token;
- strncpy(name, ifname, IFNAMSIZ);
- name[IFNAMSIZ-1] = '\0';
+ strlcpy(name, ifname, sizeof(name));
ANDROID_INFO(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
"ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr)
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+static void
+wl_android_check_priv_cmd_errors(struct net_device *dev)
{
-#define PRIVATE_COMMAND_MAX_LEN 8192
-#define PRIVATE_COMMAND_DEF_LEN 4096
- int ret = 0;
- char *command = NULL;
- int bytes_written = 0;
- android_wifi_priv_cmd priv_cmd;
- int buf_size = 0;
- dhd_pub_t *dhd = dhd_get_pub(net);
-
- net_os_wake_lock(net);
+ dhd_pub_t *dhdp;
+ int memdump_mode;
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- goto exit;
+ if (!dev) {
+ ANDROID_ERROR(("dev is NULL\n"));
+ return;
}
- if (!ifr->ifr_data) {
- ret = -EINVAL;
- goto exit;
+ dhdp = wl_cfg80211_get_dhdp(dev);
+ if (!dhdp) {
+ ANDROID_ERROR(("dhdp is NULL\n"));
+ return;
}
-#ifdef CONFIG_COMPAT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
- if (in_compat_syscall())
+#ifdef DHD_FW_COREDUMP
+ memdump_mode = dhdp->memdump_enabled;
#else
- if (is_compat_task())
-#endif
- {
- compat_android_wifi_priv_cmd compat_priv_cmd;
- if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
- sizeof(compat_android_wifi_priv_cmd))) {
- ret = -EFAULT;
+ /* Default enable if DHD doesn't support SOCRAM dump */
+ memdump_mode = 1;
+#endif /* DHD_FW_COREDUMP */
+
+ if (report_hang_privcmd_err) {
+ priv_cmd_errors++;
+ } else {
+ priv_cmd_errors = 0;
+ }
+
+ /* Trigger HANG event only if memdump mode is enabled
+ * due to customer's request
+ */
+ if (memdump_mode == DUMP_MEMFILE_BUGON &&
+ (priv_cmd_errors > NUMBER_SEQUENTIAL_PRIVCMD_ERRORS)) {
+ ANDROID_ERROR(("Send HANG event due to sequential private cmd errors\n"));
+ priv_cmd_errors = 0;
+#ifdef DHD_FW_COREDUMP
+ /* Take a SOCRAM dump */
+ dhdp->memdump_type = DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_common_socram_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+ /* Send the HANG event to upper layer */
+ dhdp->hang_reason = HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR;
+ dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+ }
+}
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
+
+#ifdef SUPPORT_AP_SUSPEND
+int
+wl_android_set_ap_suspend(struct net_device *dev, char *command, int total_len)
+{
+ int suspend = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER SET_AP_SUSPEND <0/1> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Enable */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ suspend = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("suspend %d, ifacename %s\n", suspend, name));
+
+ err = wl_set_ap_suspend(dev, suspend? TRUE: FALSE, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set suspend, suspend %d, error = %d\n", suspend, err));
+ }
+
+ return err;
+}
+#endif /* SUPPORT_AP_SUSPEND */
+
+#ifdef SUPPORT_AP_BWCTRL
+int
+wl_android_set_ap_bw(struct net_device *dev, char *command, int total_len)
+{
+ int bw = DOT11_OPER_MODE_20MHZ;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER SET_AP_BW <0/1/2> <ifname>
+ * 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* BW */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ bw = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("bw %d, ifacename %s\n", bw, name));
+
+ err = wl_set_ap_bw(dev, bw, name);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set bw, bw %d, error = %d\n", bw, err));
+ }
+
+ return err;
+}
+
+int
+wl_android_get_ap_bw(struct net_device *dev, char *command, int total_len)
+{
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ char name[IFNAMSIZ];
+
+ /*
+ * DRIVER GET_AP_BW <ifname>
+ * returns 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ return -EINVAL;
+ }
+ ifname = token;
+
+ strlcpy(name, ifname, sizeof(name));
+ ANDROID_INFO(("ifacename %s\n", name));
+
+ bytes_written = wl_get_ap_bw(dev, command, name, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get bw, error = %d\n", bytes_written));
+ return -EPROTO;
+ }
+
+ return bytes_written;
+
+}
+#endif /* SUPPORT_AP_BWCTRL */
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr)
+{
+#define PRIVATE_COMMAND_MAX_LEN 8192
+#define PRIVATE_COMMAND_DEF_LEN 4096
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+ int buf_size = 0;
+ dhd_pub_t *dhd = dhd_get_pub(net);
+
+ net_os_wake_lock(net);
+
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ compat_android_wifi_priv_cmd compat_priv_cmd;
+ if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+ sizeof(compat_android_wifi_priv_cmd))) {
+ ret = -EFAULT;
goto exit;
}
}
exit:
+#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
+ if (ret) {
+ /* Avoid incrementing priv_cmd_errors in case of unsupported feature */
+ if (ret != BCME_UNSUPPORTED) {
+ wl_android_check_priv_cmd_errors(net);
+ }
+ } else {
+ priv_cmd_errors = 0;
+ }
+#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
net_os_wake_unlock(net);
MFREE(dhd->osh, command, (buf_size + 1));
return ret;
}
#ifdef WL_BCNRECV
+#define BCNRECV_ATTR_HDR_LEN 30
+int
+wl_android_bcnrecv_event(struct net_device *ndev, uint attr_type,
+ uint status, uint reason, uint8 *data, uint data_len)
+{
+ s32 err = BCME_OK;
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+
+ len = BCNRECV_ATTR_HDR_LEN + data_len;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_BEACON_RECV, kflags);
+ if (!skb) {
+ ANDROID_ERROR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+ if ((attr_type == BCNRECV_ATTR_BCNINFO) && (data)) {
+ /* send bcn info to upper layer */
+ nla_put(skb, BCNRECV_ATTR_BCNINFO, data_len, data);
+ } else if (attr_type == BCNRECV_ATTR_STATUS) {
+ nla_put_u32(skb, BCNRECV_ATTR_STATUS, status);
+ if (reason) {
+ nla_put_u32(skb, BCNRECV_ATTR_REASON, reason);
+ }
+ } else {
+ ANDROID_ERROR(("UNKNOWN ATTR_TYPE. attr_type:%d\n", attr_type));
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, kflags);
+ return err;
+}
+
static int
_wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool user_trigger)
{
s32 err = BCME_OK;
/* check any scan is in progress before beacon recv scan trigger IOVAR */
- if (wl_get_drv_status(cfg, SCANNING, ndev) ||
- wl_get_p2p_status(cfg, SCANNING) ||
- wl_get_drv_status(cfg, REMAINING_ON_CHANNEL, ndev)) {
- ANDROID_ERROR(("Scan in progress, Aborting the scan error:%d\n", err));
- err = BCME_BUSY;
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
goto exit;
}
- /* Beacon recv required wlan0 interface state, it won't be p2p connected state */
- if (wl_cfgp2p_vif_created(cfg)) {
- ANDROID_ERROR(("Beacon recv required wlan0 interface error:%d\n", err));
+ if (wl_get_p2p_status(cfg, SCANNING)) {
err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("P2P Scan in progress, Aborting beacon recv start, "
+ "error:%d\n", err));
+ goto exit;
+ }
+
+ if (wl_get_drv_status(cfg, REMAINING_ON_CHANNEL, ndev)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("P2P remain on channel, Aborting beacon recv start, "
+ "error:%d\n", err));
goto exit;
}
* else exit from beacon recv scan
*/
if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ err = BCME_UNSUPPORTED;
ANDROID_ERROR(("STA is in not associated state error:%d\n", err));
- err = BCME_NOTASSOCIATED;
goto exit;
}
+#ifdef WL_NAN
+ /* Check NAN is enabled, if enabled exit else continue */
+ if (wl_cfgnan_check_state(cfg)) {
+ err = BCME_UNSUPPORTED;
+ ANDROID_ERROR(("Nan is enabled, NAN+STA+FAKEAP concurrency is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_NAN */
+
/* Triggering an sendup_bcn iovar */
err = wldev_iovar_setint(ndev, "sendup_bcn", 1);
if (unlikely(err)) {
ANDROID_ERROR(("sendup_bcn failed to set, error:%d\n", err));
} else {
cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STARTED;
- WL_INFORM_MEM(("bcnrecv started\n"));
+ WL_INFORM_MEM(("bcnrecv started. user_trigger:%d\n", user_trigger));
if (user_trigger) {
- WL_INFORM_MEM(("BCN-RECV-STARTED"));
+ if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS,
+ WL_BCNRECV_STARTED, 0, NULL, 0)) != BCME_OK) {
+ ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
+ }
}
}
exit:
+ /*
+ * BCNRECV start request can be rejected from dongle
+ * in various conditions.
+ * Error code need to be overridden to BCME_UNSUPPORTED
+ * to avoid hang event from continous private
+ * command error
+ */
+ if (err) {
+ err = BCME_UNSUPPORTED;
+ }
return err;
}
_wl_android_bcnrecv_stop(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint reason)
{
s32 err = BCME_OK;
+ u32 status;
- /* Triggering an sendup_bcn iovar */
- err = wldev_iovar_setint(ndev, "sendup_bcn", 0);
- if (unlikely(err)) {
- ANDROID_ERROR(("sendup_bcn failed to set error:%d\n", err));
- goto exit;
+ /* Send sendup_bcn iovar for all cases except W_BCNRECV_ROAMABORT reason -
+ * fw generates roam abort event after aborting the bcnrecv.
+ */
+ if (reason != WL_BCNRECV_ROAMABORT) {
+ /* Triggering an sendup_bcn iovar */
+ err = wldev_iovar_setint(ndev, "sendup_bcn", 0);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("sendup_bcn failed to set error:%d\n", err));
+ goto exit;
+ }
}
- /* Send notification for all cases other than suspend */
+ /* Send notification for all cases */
if (reason == WL_BCNRECV_SUSPEND) {
cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_SUSPENDED;
+ status = WL_BCNRECV_SUSPENDED;
} else {
cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STOPPED;
WL_INFORM_MEM(("bcnrecv stopped\n"));
if (reason == WL_BCNRECV_USER_TRIGGER) {
- WL_INFORM_MEM(("BCN-RECV-STOPPED"));
+ status = WL_BCNRECV_STOPPED;
} else {
- SUPP_EVENT(("CTRL-EVENT-BCN-RECV-ABORTED", "Reason=%d\n", reason));
+ status = WL_BCNRECV_ABORTED;
}
}
+ if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS, status,
+ reason, NULL, 0)) != BCME_OK) {
+ ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
+ }
exit:
return err;
}
{
s32 err = BCME_OK;
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
mutex_lock(&cfg->bcn_sync);
err = _wl_android_bcnrecv_start(cfg, ndev, true);
mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
return err;
}
s32 ret = BCME_OK;
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
+ mutex_lock(&cfg->scan_sync);
mutex_lock(&cfg->bcn_sync);
if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED) {
WL_INFORM_MEM(("bcnrecv resume\n"));
ret = _wl_android_bcnrecv_start(cfg, ndev, false);
}
mutex_unlock(&cfg->bcn_sync);
+ mutex_unlock(&cfg->scan_sync);
return ret;
}
int
wl_android_bcnrecv_config(struct net_device *ndev, char *cmd_argv, int total_len)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ struct bcm_cfg80211 *cfg = NULL;
uint err = BCME_OK;
- if (!ndev || !cfg) {
- ANDROID_ERROR(("ndev/cfg is NULL\n"));
+ if (!ndev) {
+ ANDROID_ERROR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ ANDROID_ERROR(("cfg is NULL\n"));
return -EINVAL;
}
}
#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+/* CAC TSPEC functionality code implementation */
+static void
+wl_android_update_tsinfo(uint8 access_category, tspec_arg_t *tspec_arg)
+{
+ uint8 tspec_id;
+ /* Using direction as bidirectional by default */
+ uint8 direction = TSPEC_BI_DIRECTION;
+ /* Using U-APSD as the default power save mode */
+ uint8 user_psb = TSPEC_UAPSD_PSB;
+ uint8 ADDTS_AC2PRIO[4] = {PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_VI, PRIO_8021D_VO};
+
+ /* Map tspec_id from access category */
+ tspec_id = ADDTS_AC2PRIO[access_category];
+
+ /* Update the tsinfo */
+ tspec_arg->tsinfo.octets[0] = (uint8)(TSPEC_EDCA_ACCESS | direction |
+ (tspec_id << TSPEC_TSINFO_TID_SHIFT));
+ tspec_arg->tsinfo.octets[1] = (uint8)((tspec_id << TSPEC_TSINFO_PRIO_SHIFT) |
+ user_psb);
+ tspec_arg->tsinfo.octets[2] = 0x00;
+}
+
+static s32
+wl_android_handle_cac_action(struct bcm_cfg80211 * cfg, struct net_device * ndev, char * argv)
+{
+ tspec_arg_t tspec_arg;
+ s32 err = BCME_ERROR;
+ u8 ts_cmd[12] = "cac_addts";
+ uint8 access_category;
+ s32 bssidx;
+
+ /* Following handling is done only for the primary interface */
+ memset_s(&tspec_arg, sizeof(tspec_arg), 0, sizeof(tspec_arg));
+ if (strncmp(argv, "addts", strlen("addts")) == 0) {
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu %hu %hu", &access_category,
+ &tspec_arg.nom_msdu_size, &tspec_arg.surplus_bw);
+ if ((access_category > TSPEC_MAX_ACCESS_CATEGORY) ||
+ ((tspec_arg.surplus_bw < TSPEC_MIN_SURPLUS_BW) ||
+ (tspec_arg.surplus_bw > TSPEC_MAX_SURPLUS_BW)) ||
+ (tspec_arg.nom_msdu_size > TSPEC_MAX_MSDU_SIZE)) {
+ ANDROID_ERROR(("Invalid params access_category %hhu nom_msdu_size %hu"
+ " surplus BW %hu\n", access_category, tspec_arg.nom_msdu_size,
+ tspec_arg.surplus_bw));
+ return BCME_USAGE_ERROR;
+ }
+
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ /* Update other tspec parameters */
+ tspec_arg.dialog_token = TSPEC_DEF_DIALOG_TOKEN;
+ tspec_arg.mean_data_rate = TSPEC_DEF_MEAN_DATA_RATE;
+ tspec_arg.min_phy_rate = TSPEC_DEF_MIN_PHY_RATE;
+ } else if (strncmp(argv, "delts", strlen("delts")) == 0) {
+ snprintf(ts_cmd, sizeof(ts_cmd), "cac_delts");
+ tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
+ tspec_arg.version = TSPEC_ARG_VERSION;
+ /* Read the params passed */
+ sscanf(argv, "%*s %hhu", &access_category);
+
+ if (access_category > TSPEC_MAX_ACCESS_CATEGORY) {
+ WL_INFORM_MEM(("Invalide param, access_category %hhu\n", access_category));
+ return BCME_USAGE_ERROR;
+ }
+ /* Update tsinfo */
+ wl_android_update_tsinfo(access_category, &tspec_arg);
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ ANDROID_ERROR(("Find index failed\n"));
+ err = BCME_ERROR;
+ return err;
+ }
+ err = wldev_iovar_setbuf_bsscfg(ndev, ts_cmd, &tspec_arg, sizeof(tspec_arg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s error (%d)\n", ts_cmd, err));
+ }
+
+ return err;
+}
+
+static s32
+wl_android_cac_ts_config(struct net_device *ndev, char *cmd_argv, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ s32 err = BCME_OK;
+
+ if (!ndev) {
+ ANDROID_ERROR(("ndev is NULL\n"));
+ return -EINVAL;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ ANDROID_ERROR(("cfg is NULL\n"));
+ return -EINVAL;
+ }
+
+ /* Request supported only for primary interface */
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+ ANDROID_ERROR(("Request on non-primary interface\n"));
+ return -1;
+ }
+
+ /* sync commands from user space */
+ mutex_lock(&cfg->usr_sync);
+ err = wl_android_handle_cac_action(cfg, ndev, cmd_argv);
+ mutex_unlock(&cfg->usr_sync);
+
+ return err;
+}
+#endif /* WL_CAC_TS */
+
+#ifdef WL_GET_CU
+/* Implementation to get channel usage from framework */
+static s32
+wl_android_get_channel_util(struct net_device *ndev, char *command, int total_len)
+{
+ s32 bytes_written, err = 0;
+ wl_bssload_t bssload;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u8 chan_use_percentage = 0;
+
+ if ((err = wldev_iovar_getbuf(ndev, "bssload_report", NULL,
+ 0, smbuf, WLC_IOCTL_SMLEN, NULL))) {
+ ANDROID_ERROR(("Getting bssload report failed with err=%d \n", err));
+ return err;
+ }
+
+ (void)memcpy_s(&bssload, sizeof(wl_bssload_t), smbuf, sizeof(wl_bssload_t));
+ /* Convert channel usage to percentage value */
+ chan_use_percentage = (bssload.chan_util * 100) / 255;
+
+ bytes_written = snprintf(command, total_len, "CU %hhu",
+ chan_use_percentage);
+ ANDROID_INFO(("Channel Utilization %u %u\n", bssload.chan_util, chan_use_percentage));
+
+ return bytes_written;
+}
+#endif /* WL_GET_CU */
+
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+static void
+wl_android_set_rtt_geofence_interval(struct net_device *ndev, char *command)
+{
+ int rtt_interval = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+ char *rtt_intp = command + strlen(CMD_GEOFENCE_INTERVAL) + 1;
+
+ rtt_interval = bcm_atoi(rtt_intp);
+ dhd_rtt_set_geofence_rtt_interval(dhdp, rtt_interval);
+}
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int
+wl_android_set_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+{
+ char *ifname = NULL;
+ char *pos, *token;
+ int err = BCME_OK;
+ int enable = FALSE;
+
+ /*
+ * STA/AP/GO I/F: DRIVER SET_SOFTAP_ELNA_BYPASS <ifname> <enable/disable>
+ * the enable/disable format follows Samsung specific rules as following
+ * Enable : 0
+ * Disable :-1
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ifname = token;
+
+ /* get enable/disable flag */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about Enable/Disable\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ enable = bcm_atoi(token);
+
+ CUSTOMER_HW4_EN_CONVERT(enable);
+ err = wl_set_softap_elna_bypass(dev, ifname, enable);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to set ELNA Bypass of SoftAP mode, err=%d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ return err;
+}
+
+int
+wl_android_get_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+{
+ char *ifname = NULL;
+ char *pos, *token;
+ int err = BCME_OK;
+ int bytes_written = 0;
+ int softap_elnabypass = 0;
+
+ /*
+ * STA/AP/GO I/F: DRIVER GET_SOFTAP_ELNA_BYPASS <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token) {
+ ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ifname = token;
+
+ err = wl_get_softap_elna_bypass(dev, ifname, &softap_elnabypass);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: Failed to get ELNA Bypass of SoftAP mode, err=%d\n",
+ __FUNCTION__, err));
+ return err;
+ } else {
+ softap_elnabypass--; //Convert format to Customer HW4
+ ANDROID_INFO(("%s: eLNA Bypass feature enable status is %d\n",
+ __FUNCTION__, softap_elnabypass));
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_SOFTAP_ELNA_BYPASS, softap_elnabypass);
+ }
+
+ return bytes_written;
+}
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+
+#ifdef WL_NAN
+int
+wl_android_get_nan_status(struct net_device *dev, char *command, int total_len)
+{
+ int bytes_written = 0;
+ int error = BCME_OK;
+ wl_nan_conf_status_t nstatus;
+
+ error = wl_cfgnan_get_status(dev, &nstatus);
+ if (error) {
+ ANDROID_ERROR(("Failed to get nan status (%d)\n", error));
+ return error;
+ }
+
+ bytes_written = snprintf(command, total_len,
+ "EN:%d Role:%d EM:%d CID:"MACF" NMI:"MACF" SC(2G):%d SC(5G):%d "
+ "MR:"NMRSTR" AMR:"NMRSTR" IMR:"NMRSTR
+ "HC:%d AMBTT:%04x TSF[%04x:%04x]\n",
+ nstatus.enabled,
+ nstatus.role,
+ nstatus.election_mode,
+ ETHERP_TO_MACF(&(nstatus.cid)),
+ ETHERP_TO_MACF(&(nstatus.nmi)),
+ nstatus.social_chans[0],
+ nstatus.social_chans[1],
+ NMR2STR(nstatus.mr),
+ NMR2STR(nstatus.amr),
+ NMR2STR(nstatus.imr),
+ nstatus.hop_count,
+ nstatus.ambtt,
+ nstatus.cluster_tsf_h,
+ nstatus.cluster_tsf_l);
+ return bytes_written;
+}
+#endif /* WL_NAN */
+
+#ifdef SUPPORT_NAN_RANGING_TEST_BW
+enum {
+ NAN_RANGING_5G_BW20 = 1,
+ NAN_RANGING_5G_BW40,
+ NAN_RANGING_5G_BW80
+};
+
+int
+wl_nan_ranging_bw(struct net_device *net, int bw, char *command)
+{
+ int bytes_written, err = BCME_OK;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 val = 1;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+
+ if (bw < NAN_RANGING_5G_BW20 || bw > NAN_RANGING_5G_BW80) {
+ ANDROID_ERROR(("Wrong BW cmd:%d, %s\n", bw, __FUNCTION__));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ return bytes_written;
+ }
+
+ switch (bw) {
+ case NAN_RANGING_5G_BW20:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW20\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x1;
+ break;
+ case NAN_RANGING_5G_BW40:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW40\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x3;
+ break;
+ case NAN_RANGING_5G_BW80:
+ ANDROID_ERROR(("NAN_RANGING 5G/BW80\n"));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = 0x7;
+ break;
+ }
+
+ err = wldev_ioctl_set(net, WLC_DOWN, &val, sizeof(s32));
+ if (err) {
+ ANDROID_ERROR(("WLC_DOWN error %d\n", err));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ } else {
+ err = wldev_iovar_setbuf(net, "bw_cap", ¶m, sizeof(param),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+
+ if (err) {
+ ANDROID_ERROR(("BW set failed\n"));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ } else {
+ ANDROID_ERROR(("BW set done\n"));
+ bytes_written = scnprintf(command, sizeof("OK"), "OK");
+ }
+
+ err = wldev_ioctl_set(net, WLC_UP, &val, sizeof(s32));
+ if (err < 0) {
+ ANDROID_ERROR(("WLC_UP error %d\n", err));
+ bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ }
+ }
+ return bytes_written;
+}
+#endif /* SUPPORT_NAN_RANGING_TEST_BW */
+
int
wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
{
else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
wl_cfg80211_set_passive_scan(net, command);
}
-#endif
+#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
}
else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
bytes_written = wl_android_set_suspendmode(net, command);
}
+ else if (strnicmp(command, CMD_SETDTIM_IN_SUSPEND, strlen(CMD_SETDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_bcn_li_dtim(net, command);
+ }
else if (strnicmp(command, CMD_MAXDTIM_IN_SUSPEND, strlen(CMD_MAXDTIM_IN_SUSPEND)) == 0) {
bytes_written = wl_android_set_max_dtim(net, command);
}
+#ifdef DISABLE_DTIM_IN_SUSPEND
+ else if (strnicmp(command, CMD_DISDTIM_IN_SUSPEND, strlen(CMD_DISDTIM_IN_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_disable_dtim_in_suspend(net, command);
+ }
+#endif /* DISABLE_DTIM_IN_SUSPEND */
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
- uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
- if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If roam_var is unsupported, fallback to the original method */
- ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar unsupported in the firmware\n"));
- } else {
- bytes_written = -1;
- }
- }
- if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
- /* Apply if roam_band iovar is not supported or band setting is AUTO */
- bytes_written = wldev_set_band(net, band);
- }
-#else
- bytes_written = wl_cfg80211_set_if_band(net, band);
-#endif /* WL_HOST_BAND_MGMT */
-#ifdef ROAM_CHANNEL_CACHE
- wl_update_roamscan_cache_by_band(net, band);
-#endif /* ROAM_CHANNEL_CACHE */
+ bytes_written = wl_android_set_band(net, command);
}
-#endif
+#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
}
}
#endif /* WLFBT */
#endif /* WL_CFG80211 */
-#ifdef BCMCCX
- else if (strnicmp(command, CMD_GETCCKM_RN, strlen(CMD_GETCCKM_RN)) == 0) {
- bytes_written = wl_android_get_cckm_rn(net, command);
- }
- else if (strnicmp(command, CMD_SETCCKM_KRK, strlen(CMD_SETCCKM_KRK)) == 0) {
- bytes_written = wl_android_set_cckm_krk(net, command, priv_cmd.total_len);
- }
- else if (strnicmp(command, CMD_GET_ASSOC_RES_IES, strlen(CMD_GET_ASSOC_RES_IES)) == 0) {
- bytes_written = wl_android_get_assoc_res_ies(net, command, priv_cmd.total_len);
- }
-#endif /* BCMCCX */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
strlen(CMD_GET_BEST_CHANNELS)) == 0) {
else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
bytes_written = wl_android_set_ibss_beacon_ouidata(net,
command, priv_cmd.total_len);
-#endif
+#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
int skip = strlen(CMD_KEEP_ALIVE) + 1;
bytes_written = wl_keep_alive_set(net, command + skip);
ANDROID_INFO(("Deleteing %s interface\n", name));
bytes_written = wl_cfg80211_del_if(wl_get_cfg(net), net, NULL, name);
}
-#endif
+#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
}
char *data = (command + strlen(CMD_DFS_AP_MOVE) +1);
bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len);
}
-#endif
+#endif /* WL_CFG80211 */
#ifdef SET_RPS_CPUS
else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) {
bytes_written = wl_android_set_rps_cpus(net, command);
bytes_written = wl_android_get_ap_rps(net, command, priv_cmd.total_len);
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef SUPPORT_AP_SUSPEND
+ else if (strnicmp(command, CMD_SET_AP_SUSPEND, strlen(CMD_SET_AP_SUSPEND)) == 0) {
+ bytes_written = wl_android_set_ap_suspend(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_AP_SUSPEND */
+#ifdef SUPPORT_AP_BWCTRL
+ else if (strnicmp(command, CMD_SET_AP_BW, strlen(CMD_SET_AP_BW)) == 0) {
+ bytes_written = wl_android_set_ap_bw(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_AP_BW, strlen(CMD_GET_AP_BW)) == 0) {
+ bytes_written = wl_android_get_ap_bw(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_AP_BWCTRL */
#ifdef SUPPORT_RSSI_SUM_REPORT
else if (strnicmp(command, CMD_SET_RSSI_LOGGING, strlen(CMD_SET_RSSI_LOGGING)) == 0) {
bytes_written = wl_android_set_rssi_logging(net, command, priv_cmd.total_len);
if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP), " ", 1) == 0) {
/* compare unwanted/disconnected command */
if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
+ SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
dhd_log_dump_trigger(dhdp, CMD_UNWANTED);
} else if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
+ SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
dhd_log_dump_trigger(dhdp, CMD_DISCONNECTED);
} else {
dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
}
}
#endif /* DHD_LOG_DUMP */
+#ifdef DHD_STATUS_LOGGING
+ else if (strnicmp(command, CMD_DUMP_STATUS_LOG, strlen(CMD_DUMP_STATUS_LOG)) == 0) {
+ dhd_statlog_dump_scr(wl_cfg80211_get_dhdp(net));
+ }
+ else if (strnicmp(command, CMD_QUERY_STATUS_LOG, strlen(CMD_QUERY_STATUS_LOG)) == 0) {
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+ bytes_written = dhd_statlog_query(dhdp, command, priv_cmd.total_len);
+ }
+#endif /* DHD_STATUS_LOGGING */
#ifdef SET_PCIE_IRQ_CPU_CORE
else if (strnicmp(command, CMD_PCIE_IRQ_CORE, strlen(CMD_PCIE_IRQ_CORE)) == 0) {
- int set = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
- wl_android_set_irq_cpucore(net, set);
+ int affinity_cmd = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
+ wl_android_set_irq_cpucore(net, affinity_cmd);
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
#ifdef SUPPORT_LQCM
int verbose_level = *(command + strlen(CMD_DEBUG_VERBOSE) + 1) - '0';
bytes_written = wl_cfg80211_set_dbg_verbose(net, verbose_level);
}
-#endif
-#ifdef WL_MBO
- else if (strnicmp(command, CMD_MBO, strlen(CMD_MBO)) == 0) {
- bytes_written = wl_android_process_mbo_cmd(net, command,
- priv_cmd.total_len);
- }
-#endif /* WL_MBO */
+#endif /* WL_CFG80211 */
#ifdef WL_BCNRECV
else if (strnicmp(command, CMD_BEACON_RECV,
strlen(CMD_BEACON_RECV)) == 0) {
data, priv_cmd.total_len);
}
#endif /* WL_BCNRECV */
+#ifdef WL_MBO
+ else if (strnicmp(command, CMD_MBO, strlen(CMD_MBO)) == 0) {
+ bytes_written = wl_android_process_mbo_cmd(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ else if (strnicmp(command, CMD_CAC_TSPEC,
+ strlen(CMD_CAC_TSPEC)) == 0) {
+ char *data = (command + strlen(CMD_CAC_TSPEC) + 1);
+ bytes_written = wl_android_cac_ts_config(net,
+ data, priv_cmd.total_len);
+ }
+#endif /* WL_CAC_TS */
+#ifdef WL_GET_CU
+ else if (strnicmp(command, CMD_GET_CHAN_UTIL,
+ strlen(CMD_GET_CHAN_UTIL)) == 0) {
+ bytes_written = wl_android_get_channel_util(net,
+ command, priv_cmd.total_len);
+ }
+#endif /* WL_GET_CU */
+#ifdef RTT_GEOFENCE_INTERVAL
+#if defined(RTT_SUPPORT) && defined(WL_NAN)
+ else if (strnicmp(command, CMD_GEOFENCE_INTERVAL,
+ strlen(CMD_GEOFENCE_INTERVAL)) == 0) {
+ (void)wl_android_set_rtt_geofence_interval(net, command);
+ }
+#endif /* RTT_SUPPORT && WL_NAN */
+#endif /* RTT_GEOFENCE_INTERVAL */
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+ else if (strnicmp(command, CMD_SET_SOFTAP_ELNA_BYPASS,
+ strlen(CMD_SET_SOFTAP_ELNA_BYPASS)) == 0) {
+ bytes_written =
+ wl_android_set_softap_elna_bypass(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_SOFTAP_ELNA_BYPASS,
+ strlen(CMD_GET_SOFTAP_ELNA_BYPASS)) == 0) {
+ bytes_written =
+ wl_android_get_softap_elna_bypass(net, command, priv_cmd.total_len);
+ }
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+#ifdef WL_NAN
+ else if (strnicmp(command, CMD_GET_NAN_STATUS,
+ strlen(CMD_GET_NAN_STATUS)) == 0) {
+ bytes_written =
+ wl_android_get_nan_status(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_NAN */
+#if defined(SUPPORT_NAN_RANGING_TEST_BW)
+ else if (strnicmp(command, CMD_NAN_RANGING_SET_BW, strlen(CMD_NAN_RANGING_SET_BW)) == 0) {
+ int bw_cmd = *(command + strlen(CMD_NAN_RANGING_SET_BW) + 1) - '0';
+ bytes_written = wl_nan_ranging_bw(net, bw_cmd, command);
+ }
+#endif /* SUPPORT_NAN_RANGING_TEST_BW */
else if (wl_android_ext_priv_cmd(net, command, priv_cmd.total_len, &bytes_written) == 0) {
}
else {
dhd_download_fw_on_driverload = FALSE;
#endif /* ENABLE_INSMOD_NO_FW_LOAD */
if (!iface_name[0]) {
- memset(iface_name, 0, IFNAMSIZ);
+ bzero(iface_name, IFNAMSIZ);
bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
}
#ifdef WL_GENL
wl_genl_init();
#endif // endif
+#ifdef WL_RELMCAST
wl_netlink_init();
+#endif /* WL_RELMCAST */
return ret;
}
#ifdef WL_GENL
wl_genl_deinit();
#endif /* WL_GENL */
+#ifdef WL_RELMCAST
wl_netlink_deinit();
+#endif /* WL_RELMCAST */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+ GCC_DIAGNOSTIC_POP();
list_del(&cur->list);
kfree(cur);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ret;
}
struct net_device *ndev;
struct wireless_dev *wdev = NULL;
int ifidx = WL_STATIC_IFIDX; /* Register ndev with a reserved ifidx */
- s32 mode;
+ u8 mac_addr[ETH_ALEN];
+ struct net_device *primary_ndev;
+#ifdef DHD_USE_RANDMAC
+ struct ether_addr ea_addr;
+#endif /* DHD_USE_RANDMAC */
WL_INFORM_MEM(("[STATIC_IF] Enter (%s) iftype:%d\n", ifname, iftype));
- ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, NULL,
+ if (!cfg) {
+ ANDROID_ERROR(("cfg null\n"));
+ return NULL;
+ }
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+#ifdef DHD_USE_RANDMAC
+ dhd_generate_mac_addr(&ea_addr);
+ (void)memcpy_s(mac_addr, ETH_ALEN, ea_addr.octet, ETH_ALEN);
+#else
+ /* Use primary mac with locally admin bit set */
+ (void)memcpy_s(mac_addr, ETH_ALEN, primary_ndev->dev_addr, ETH_ALEN);
+ mac_addr[0] |= 0x02;
+#endif /* DHD_USE_RANDMAC */
+
+ ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, mac_addr,
WL_BSSIDX_MAX, NULL);
if (unlikely(!ndev)) {
ANDROID_ERROR(("Failed to allocate static_if\n"));
}
wdev->wiphy = cfg->wdev->wiphy;
-
- mode = wl_iftype_to_mode(iftype);
- wdev->iftype = wl_mode_to_nl80211_iftype(mode);
+ wdev->iftype = iftype;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
struct wireless_dev *wdev = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(net);
struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 iftype = net->ieee80211_ptr ? net->ieee80211_ptr->iftype : 0;
+ u16 wl_iftype, wl_mode;
WL_INFORM_MEM(("[STATIC_IF] dev_open ndev %p and wdev %p\n", net, net->ieee80211_ptr));
ASSERT(cfg->static_ndev == net);
+ if (cfg80211_to_wl_iftype(iftype, &wl_iftype, &wl_mode) < 0) {
+ return BCME_ERROR;
+ }
if (cfg->static_ndev_state != NDEV_STATE_FW_IF_CREATED) {
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, WL_IF_TYPE_AP, net->name, NULL);
- ASSERT(wdev == net->ieee80211_ptr);
+#ifdef DHD_USE_RANDMAC
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, net->dev_addr);
+#else
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, NULL);
+#endif // endif
+ if (!wdev) {
+ ANDROID_ERROR(("[STATIC_IF] wdev is NULL, can't proceed"));
+ return BCME_ERROR;
+ }
} else {
WL_INFORM_MEM(("Fw IF for static netdev already created\n"));
}
struct bcm_cfg80211 *cfg = wl_get_cfg(net);
struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_INFORM_MEM(("[STATIC_IF] dev_close\n"));
if (cfg->static_ndev_state == NDEV_STATE_FW_IF_CREATED) {
- ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ if (mutex_is_locked(&cfg->if_sync) == TRUE) {
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ } else {
+ ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
+ }
+
if (unlikely(ret)) {
ANDROID_ERROR(("Del iface failed for static_if %d\n", ret));
}
wdev = new_ndev->ieee80211_ptr;
ASSERT(wdev);
wdev->iftype = iface_type;
- memcpy_s(new_ndev->dev_addr, ETH_ALEN, addr, ETH_ALEN);
+ (void)memcpy_s(new_ndev->dev_addr, ETH_ALEN, addr, ETH_ALEN);
}
cfg->static_ndev_state = NDEV_STATE_FW_IF_CREATED;
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.h 768773 2018-06-21 08:38:23Z $
+ * $Id: wl_android.h 794110 2018-12-12 05:03:21Z $
*/
#ifndef _wl_android_
*/
/* message levels */
-#define ANDROID_ERROR_LEVEL 0x0001
-#define ANDROID_TRACE_LEVEL 0x0002
-#define ANDROID_INFO_LEVEL 0x0004
-#define ANDROID_EVENT_LEVEL 0x0008
-
-#define ANDROID_MSG(x) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "ANDROID-MSG) "); \
- printk x; \
- } \
- } while (0)
-#define ANDROID_ERROR(x) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "ANDROID-ERROR) "); \
- printk x; \
- } \
- } while (0)
-#define ANDROID_TRACE(x) \
- do { \
- if (android_msg_level & ANDROID_TRACE_LEVEL) { \
- printk(KERN_ERR "ANDROID-TRACE) "); \
- printk x; \
- } \
- } while (0)
-#define ANDROID_INFO(x) \
- do { \
- if (android_msg_level & ANDROID_INFO_LEVEL) { \
- printk(KERN_ERR "ANDROID-INFO) "); \
- printk x; \
- } \
- } while (0)
-#define ANDROID_EVENT(x) \
+#define ANDROID_ERROR_LEVEL (1 << 0)
+#define ANDROID_TRACE_LEVEL (1 << 1)
+#define ANDROID_INFO_LEVEL (1 << 2)
+#define ANDROID_SCAN_LEVEL (1 << 3)
+#define ANDROID_DBG_LEVEL (1 << 4)
+#define ANDROID_MSG_LEVEL (1 << 0)
+
+#define WL_MSG(name, arg1, args...) \
do { \
- if (android_msg_level & ANDROID_EVENT_LEVEL) { \
- printk(KERN_ERR "ANDROID-EVENT) "); \
- printk x; \
+ if (android_msg_level & ANDROID_MSG_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] %s : " arg1, name, __func__, ## args); \
} \
} while (0)
int wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
int wl_ext_iapsta_attach_name(struct net_device *net, int ifidx);
int wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx);
-u32 wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel);
+u32 wl_ext_iapsta_update_channel(dhd_pub_t *dhd, struct net_device *dev, u32 channel);
int wl_ext_iapsta_alive_preinit(struct net_device *dev);
int wl_ext_iapsta_alive_postinit(struct net_device *dev);
-int wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data);
int wl_ext_iapsta_attach(dhd_pub_t *pub);
void wl_ext_iapsta_dettach(dhd_pub_t *pub);
bool wl_ext_check_mesh_creating(struct net_device *net);
+#ifdef WL_CFG80211
+void wl_ext_iapsta_update_iftype(struct net_device *net, int ifidx, int wl_iftype);
+#endif
+#ifdef WL_STATIC_IF
+void wl_ext_iapsta_ifadding(struct net_device *net, int ifidx);
+#endif
extern int op_mode;
#endif
typedef struct bcol_gtk_para {
char ptk[64];
char replay[8];
} bcol_gtk_para_t;
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
+typedef enum WL_EVENT_PRIO {
+ PRIO_EVENT_IAPSTA,
+ PRIO_EVENT_ESCAN,
+ PRIO_EVENT_WEXT
+}wl_event_prio_t;
+s32 wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp);
+void wl_ext_event_dettach(dhd_pub_t *dhdp);
+int wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
+int wl_ext_event_dettach_netdev(struct net_device *net, int ifidx);
+int wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func, void *data, wl_event_prio_t prio);
+void wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func);
+void wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data);
+#endif
int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len,
int *bytes_written);
+void wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len);
enum wl_ext_status {
WL_EXT_STATUS_DISCONNECTING = 0,
WL_EXT_STATUS_DISCONNECTED,
WL_EXT_STATUS_AP_DISABLED
};
typedef struct wl_conn_info {
- struct net_device *dev;
- dhd_pub_t *dhd;
uint8 bssidx;
wlc_ssid_t ssid;
struct ether_addr bssid;
uint16 channel;
- struct delayed_work pm_enable_work;
- struct mutex pm_sync;
} wl_conn_info_t;
#if defined(WL_WIRELESS_EXT)
s32 wl_ext_connect(struct net_device *dev, wl_conn_info_t *conn_info);
-void wl_ext_pm_work_handler(struct work_struct * work);
-void wl_ext_add_remove_pm_enable_work(struct wl_conn_info *conn_info, bool add);
#endif /* defined(WL_WIRELESS_EXT) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
* restrict max number to 10 as maximum cmd string size is 255
*/
#define MAX_NUM_MAC_FILT 10
+#define WL_GET_BAND(ch) (((uint)(ch) <= CH_MAX_2G_CHANNEL) ? \
+ WLC_BAND_2G : WLC_BAND_5G)
int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
#ifdef WL_BCNRECV
extern int wl_android_bcnrecv_stop(struct net_device *ndev, uint reason);
extern int wl_android_bcnrecv_resume(struct net_device *ndev);
extern int wl_android_bcnrecv_suspend(struct net_device *ndev);
+extern int wl_android_bcnrecv_event(struct net_device *ndev,
+ uint attr_type, uint status, uint reason, uint8 *data, uint data_len);
#endif /* WL_BCNRECV */
+#ifdef WL_CAC_TS
+#define TSPEC_UPLINK_DIRECTION (0 << 5) /* uplink direction traffic stream */
+#define TSPEC_DOWNLINK_DIRECTION (1 << 5) /* downlink direction traffic stream */
+#define TSPEC_BI_DIRECTION (3 << 5) /* bi direction traffic stream */
+#define TSPEC_EDCA_ACCESS (1 << 7) /* EDCA access policy */
+#define TSPEC_UAPSD_PSB (1 << 2) /* U-APSD power saving behavior */
+#define TSPEC_TSINFO_TID_SHIFT 1 /* TID Shift */
+#define TSPEC_TSINFO_PRIO_SHIFT 3 /* PRIO Shift */
+#define TSPEC_MAX_ACCESS_CATEGORY 3
+#define TSPEC_MAX_USER_PRIO 7
+#define TSPEC_MAX_DIALOG_TOKEN 255
+#define TSPEC_MAX_SURPLUS_BW 12410
+#define TSPEC_MIN_SURPLUS_BW 11210
+#define TSPEC_MAX_MSDU_SIZE 1520
+#define TSPEC_DEF_MEAN_DATA_RATE 120000
+#define TSPEC_DEF_MIN_PHY_RATE 6000000
+#define TSPEC_DEF_DIALOG_TOKEN 7
+#endif /* WL_CAC_TS */
/* terence:
* BSSCACHE: Cache bss list
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
-#ifdef WL_ESCAN
-#include <wl_escan.h>
-#endif
-#if defined(WL_WIRELESS_EXT)
-#define WL_PM_ENABLE_TIMEOUT 10000
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop")
-#else
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member);
-#endif /* STRICT_GCC_WARNINGS */
-#endif /* defined(WL_WIRELESS_EXT) */
+#define AEXT_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] AEXT-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] AEXT-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_INFO(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] AEXT-INFO) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define AEXT_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] AEXT-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
#ifndef WL_CFG80211
#define htod32(i) i
#define CMD_CHANNEL "CHANNEL"
#define CMD_CHANNELS "CHANNELS"
#define CMD_ROAM_TRIGGER "ROAM_TRIGGER"
-#define CMD_KEEP_ALIVE "KEEP_ALIVE"
#define CMD_PM "PM"
#define CMD_MONITOR "MONITOR"
#define CMD_SET_SUSPEND_BCN_LI_DTIM "SET_SUSPEND_BCN_LI_DTIM"
#endif /* PROP_TXSTATUS_VSDB */
#endif
#endif
-#ifdef IDHCP
-#define CMD_DHCPC_ENABLE "DHCPC_ENABLE"
-#define CMD_DHCPC_DUMP "DHCPC_DUMP"
-#endif
#define CMD_AUTOCHANNEL "AUTOCHANNEL"
#define CMD_WL "WL"
-int wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
-{
- int ret;
-
- ret = wldev_ioctl(dev, cmd, arg, len, set);
- if (ret)
- ANDROID_ERROR(("%s: cmd=%d ret=%d\n", __FUNCTION__, cmd, ret));
- return ret;
-}
-
-int wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
-{
- int ret;
-
- ret = wldev_iovar_getint(dev, iovar, val);
- if (ret)
- ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
-
- return ret;
-}
-
-int wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
-{
- int ret;
-
- ret = wldev_iovar_setint(dev, iovar, val);
- if (ret)
- ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
-
- return ret;
-}
-
-int wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
- void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
-{
- int ret;
-
- ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
- if (ret != 0)
- ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
-
- return ret;
-}
-
-int wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
- void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
-{
- int ret;
-
- ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
- if (ret != 0)
- ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
-
- return ret;
-}
-
-static int wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
- void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
- struct mutex* buf_sync)
-{
- int ret;
-
- ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen,
- buf, buflen, bsscfg_idx, buf_sync);
- if (ret < 0)
- ANDROID_ERROR(("%s: iovar_name=%s ret=%d\n", __FUNCTION__, iovar_name, ret));
-
- return ret;
-}
-
#ifdef WL_EXT_IAPSTA
-typedef enum IF_STATE {
- IF_STATE_INIT = 1,
- IF_STATE_DISALBE,
- IF_STATE_ENABLE
-} if_state_t;
-
typedef enum APSTAMODE {
ISTAONLY_MODE = 1,
- IAPONLY_MODE,
- IAPSTA_MODE,
- IDUALAP_MODE,
- ISTAAPAP_MODE,
- IMESHONLY_MODE,
- IMESHSTA_MODE,
- IMESHAP_MODE,
- IMESHAPSTA_MODE,
- IMESHAPAP_MODE,
- IGOSTA_MODE
+ IAPONLY_MODE = 2,
+ ISTAAP_MODE = 3,
+ ISTAGO_MODE = 4,
+ ISTASTA_MODE = 5,
+ IDUALAP_MODE = 6,
+ ISTAAPAP_MODE = 7,
+ IMESHONLY_MODE = 8,
+ ISTAMESH_MODE = 9,
+ IMESHAP_MODE = 10,
+ ISTAAPMESH_MODE = 11,
+ IMESHAPAP_MODE = 12
} apstamode_t;
typedef enum IFMODE {
PRIO_AP,
PRIO_MESH,
PRIO_STA
-}wl_prio_t;
+} wl_prio_t;
typedef struct wl_if_info {
struct net_device *dev;
- if_state_t ifstate;
ifmode_t ifmode;
+ unsigned long status;
char prefix;
wl_prio_t prio;
int ifidx;
bool vsdb;
uint csa;
apstamode_t apstamode;
- bool netif_change;
- bool mesh_creating;
wait_queue_head_t netif_change_event;
+ struct mutex usr_sync;
} wl_apsta_params_t;
+#define MAX_AP_LINK_WAIT_TIME 3000
+#define MAX_STA_LINK_WAIT_TIME 15000
+enum wifi_isam_status {
+ ISAM_STATUS_IF_ADDING = 0,
+ ISAM_STATUS_IF_READY,
+ ISAM_STATUS_STA_CONNECTING,
+ ISAM_STATUS_STA_CONNECTED,
+ ISAM_STATUS_AP_CREATING,
+ ISAM_STATUS_AP_CREATED
+};
+
+#define wl_get_isam_status(cur_if, stat) \
+ (test_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_set_isam_status(cur_if, stat) \
+ (set_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_clr_isam_status(cur_if, stat) \
+ (clear_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+#define wl_chg_isam_status(cur_if, stat) \
+ (change_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
+
static int wl_ext_enable_iface(struct net_device *dev, char *ifname);
#endif
-/* Return a legacy chanspec given a new chanspec
- * Returns INVCHANSPEC on error
- */
+#ifdef IDHCP
+typedef struct dhcpc_parameter {
+ uint32 ip_addr;
+ uint32 ip_serv;
+ uint32 lease_time;
+} dhcpc_para_t;
+#endif
+
+#ifdef WL_EXT_WOWL
+#define WL_WOWL_TCPFIN (1 << 26)
+typedef struct wl_wowl_pattern2 {
+ char cmd[4];
+ wl_wowl_pattern_t wowl_pattern;
+} wl_wowl_pattern2_t;
+#endif
+
+#ifdef WL_EXT_TCPKA
+typedef struct tcpka_conn {
+ uint32 sess_id;
+ struct ether_addr dst_mac; /* Destinition Mac */
+ struct ipv4_addr src_ip; /* Sorce IP */
+ struct ipv4_addr dst_ip; /* Destinition IP */
+ uint16 ipid; /* Ip Identification */
+ uint16 srcport; /* Source Port Address */
+ uint16 dstport; /* Destination Port Address */
+ uint32 seq; /* TCP Sequence Number */
+ uint32 ack; /* TCP Ack Number */
+ uint16 tcpwin; /* TCP window */
+ uint32 tsval; /* Timestamp Value */
+ uint32 tsecr; /* Timestamp Echo Reply */
+ uint32 len; /* last packet payload len */
+ uint32 ka_payload_len; /* keep alive payload length */
+ uint8 ka_payload[1]; /* keep alive payload */
+} tcpka_conn_t;
+
+typedef struct tcpka_conn_sess {
+ uint32 sess_id; /* session id */
+ uint32 flag; /* enable/disable flag */
+ wl_mtcpkeep_alive_timers_pkt_t tcpka_timers;
+} tcpka_conn_sess_t;
+
+typedef struct tcpka_conn_info {
+ uint32 ipid;
+ uint32 seq;
+ uint32 ack;
+} tcpka_conn_sess_info_t;
+#endif
+
+static int wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len);
+
+static int
+wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ int ret;
+
+ ret = wldev_ioctl(dev, cmd, arg, len, set);
+ if (ret)
+ AEXT_ERROR(dev->name, "cmd=%d, ret=%d\n", cmd, ret);
+ return ret;
+}
+
+static int
+wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
+{
+ int ret;
+
+ ret = wldev_iovar_getint(dev, iovar, val);
+ if (ret)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
+
+ return ret;
+}
+
+static int
+wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
+{
+ int ret;
+
+ ret = wldev_iovar_setint(dev, iovar, val);
+ if (ret)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
+
+ return ret;
+}
+
+static int
+wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
+static int
+wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
+static int
+wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
+ struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen,
+ buf, buflen, bsscfg_idx, buf_sync);
+ if (ret < 0)
+ AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+
+ return ret;
+}
+
static chanspec_t
wl_ext_chspec_to_legacy(chanspec_t chspec)
{
chanspec_t lchspec;
if (wf_chspec_malformed(chspec)) {
- ANDROID_ERROR(("wl_ext_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
- chspec));
+ AEXT_ERROR("wlan", "input chanspec (0x%04X) malformed\n", chspec);
return INVCHANSPEC;
}
} else {
/* cannot express the bandwidth */
char chanbuf[CHANSPEC_STR_LEN];
- ANDROID_ERROR((
- "wl_ext_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
- "to pre-11ac format\n",
- wf_chspec_ntoa(chspec, chanbuf), chspec));
+ AEXT_ERROR("wlan", "unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec);
return INVCHANSPEC;
}
return lchspec;
}
-/* given a chanspec value, do the endian and chanspec version conversion to
- * a chanspec_t value
- * Returns INVCHANSPEC on error
- */
static chanspec_t
wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
{
join_params->params.chanspec_num =
htod32(join_params->params.chanspec_num);
- ANDROID_TRACE(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+ AEXT_ERROR("wlan", "join_params->params.chanspec_list[0]= %X, %d channels\n",
join_params->params.chanspec_list[0],
- join_params->params.chanspec_num));
+ join_params->params.chanspec_num);
}
}
}
if (wf_chspec_malformed(chspec)) {
- ANDROID_ERROR(("wl_ext_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
- chspec));
+ AEXT_ERROR("wlan", "output chanspec (0x%04X) malformed\n", chspec);
return INVCHANSPEC;
}
}
val = dtoh32(val);
if (val != WLC_IOCTL_VERSION && val != 1) {
- ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
- val, WLC_IOCTL_VERSION));
+ AEXT_ERROR(dev->name, "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION);
return BCME_VERSION;
}
*ioctl_ver = val;
iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- ANDROID_ERROR(("bw_cap failed, %d\n", err));
+ AEXT_ERROR(dev->name, "bw_cap failed, %d\n", err);
return err;
} else {
err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
if (bw == WL_CHANSPEC_BW_80)
goto change_bw;
wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1);
- ANDROID_MSG(("%s: channel %d\n", __FUNCTION__, _chan));
+ WL_MSG(dev->name, "channel %d\n", _chan);
} else if (err) {
- ANDROID_ERROR(("%s: failed to set chanspec error %d\n",
- __FUNCTION__, err));
+ AEXT_ERROR(dev->name, "failed to set chanspec error %d\n", err);
} else
- ANDROID_MSG(("%s: %s channel %d, 0x%x\n", __FUNCTION__,
- dev->name, channel, chspec));
+ WL_MSG(dev->name, "channel %d, 0x%x\n", channel, chspec);
} else {
- ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
- __FUNCTION__));
+ AEXT_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
err = BCME_ERROR;
}
} else {
bw = 0;
if (bw)
goto set_channel;
- ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
+ AEXT_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
err = BCME_ERROR;
}
*ret_chspec = fw_chspec;
return err;
}
-int
+static int
wl_ext_channel(struct net_device *dev, char* command, int total_len)
{
int ret;
chanspec_t fw_chspec;
int ioctl_ver = 0;
- ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "cmd %s", command);
sscanf(command, "%*s %d", &channel);
} else {
if (!(ret = wl_ext_ioctl(dev, WLC_GET_CHANNEL, &ci,
sizeof(channel_info_t), FALSE))) {
- ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel));
- ANDROID_TRACE(("target_channel %d\n", ci.target_channel));
- ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel));
+ AEXT_TRACE(dev->name, "hw_channel %d\n", ci.hw_channel);
+ AEXT_TRACE(dev->name, "target_channel %d\n", ci.target_channel);
+ AEXT_TRACE(dev->name, "scan_channel %d\n", ci.scan_channel);
bytes_written = snprintf(command, sizeof(channel_info_t)+2,
"channel %d", ci.hw_channel);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
}
return ret;
}
-int
+static int
wl_ext_channels(struct net_device *dev, char* command, int total_len)
{
int ret, i;
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
wl_uint32_list_t *list;
- ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "cmd %s", command);
memset(valid_chan_list, 0, sizeof(valid_chan_list));
list = (wl_uint32_list_t *)(void *) valid_chan_list;
ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
sizeof(valid_chan_list), 0);
if (ret<0) {
- ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
+ AEXT_ERROR(dev->name, "get channels failed with %d\n", ret);
} else {
bytes_written = snprintf(command, total_len, "channels");
for (i = 0; i < dtoh32(list->count); i++) {
bytes_written += snprintf(command+bytes_written, total_len, " %d",
dtoh32(list->element[i]));
}
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
return ret;
}
-int
+static int
wl_ext_roam_trigger(struct net_device *dev, char* command, int total_len)
{
int ret = 0;
if (!ret)
trigger[1] = roam_trigger[0];
- ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1]));
+ AEXT_TRACE(dev->name, "roam_trigger %d %d\n", trigger[0], trigger[1]);
bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]);
ret = bytes_written;
}
}
static int
-wl_ext_pattern_atoh(char *src, char *dst)
-{
- int i;
- if (strncmp(src, "0x", 2) != 0 &&
- strncmp(src, "0X", 2) != 0) {
- ANDROID_ERROR(("Mask invalid format. Needs to start with 0x\n"));
- return -1;
- }
- src = src + 2; /* Skip past 0x */
- if (strlen(src) % 2 != 0) {
- DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
- return -1;
- }
- for (i = 0; *src != '\0'; i++) {
- char num[3];
- bcm_strncpy_s(num, sizeof(num), src, 2);
- num[2] = '\0';
- dst[i] = (uint8)strtoul(num, NULL, 16);
- src += 2;
- }
- return i;
-}
-
-int
-wl_ext_keep_alive(struct net_device *dev, char *command, int total_len)
-{
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
- int ret = -1, i;
- int id, period=-1, len_bytes=0, buf_len=0;
- char data[200]="\0";
- char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0";
- int bytes_written = -1;
-
- ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command));
- sscanf(command, "%*s %d %d %s", &id, &period, data);
- ANDROID_TRACE(("%s: id=%d, period=%d, data=%s\n", __FUNCTION__, id, period, data));
-
- if (period >= 0) {
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;
- mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);
- mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
- mkeep_alive_pktp->keep_alive_id = id;
- buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
- mkeep_alive_pktp->period_msec = period;
- if (strlen(data)) {
- len_bytes = wl_ext_pattern_atoh(data, (char *) mkeep_alive_pktp->data);
- buf_len += len_bytes;
- }
- mkeep_alive_pktp->len_bytes = htod16(len_bytes);
-
- ret = wl_ext_iovar_setbuf(dev, "mkeep_alive", buf, buf_len,
- iovar_buf, sizeof(iovar_buf), NULL);
- } else {
- if (id < 0)
- id = 0;
- ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
- sizeof(buf), NULL);
- if (ret) {
- goto exit;
- } else {
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
- if (android_msg_level & ANDROID_INFO_LEVEL) {
- printf("Id :%d\n"
- "Period (msec) :%d\n"
- "Length :%d\n"
- "Packet :0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes));
- for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
- printf("%02x", mkeep_alive_pktp->data[i]);
- }
- printf("\n");
- }
- }
- bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ",
- dtoh32(mkeep_alive_pktp->period_msec));
- bytes_written += snprintf(command+bytes_written, total_len, "0x");
- for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
- bytes_written += snprintf(command+bytes_written, total_len, "%02x",
- mkeep_alive_pktp->data[i]);
- }
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
- ret = bytes_written;
- }
-
-exit:
- return ret;
-}
-
-int
wl_ext_pm(struct net_device *dev, char *command, int total_len)
{
int pm=-1, ret = -1;
char *pm_local;
int bytes_written=-1;
- ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "cmd %s", command);
sscanf(command, "%*s %d", &pm);
} else {
ret = wl_ext_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), 0);
if (!ret) {
- ANDROID_TRACE(("%s: PM = %d\n", __func__, pm));
+ AEXT_TRACE(dev->name, "PM = %d", pm);
if (pm == PM_OFF)
pm_local = "PM_OFF";
else if(pm == PM_MAX)
pm_local = "Invalid";
}
bytes_written = snprintf(command, total_len, "PM %s", pm_local);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
}
static int
wl_ext_monitor(struct net_device *dev, char *command, int total_len)
{
- int val, ret = -1;
+ int val = -1, ret = -1;
int bytes_written=-1;
sscanf(command, "%*s %d", &val);
} else {
ret = wl_ext_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), 0);
if (!ret) {
- ANDROID_TRACE(("%s: monitor = %d\n", __FUNCTION__, val));
+ AEXT_TRACE(dev->name, "monitor = %d\n", val);
bytes_written = snprintf(command, total_len, "monitor %d", val);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
}
s32
wl_ext_connect(struct net_device *dev, struct wl_conn_info *conn_info)
{
- wl_extjoin_params_t *ext_join_params;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_extjoin_params_t *ext_join_params = NULL;
struct wl_join_params join_params;
size_t join_params_size;
s32 err = 0;
u32 chan_cnt = 0;
- s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s8 *iovar_buf = NULL;
int ioctl_ver = 0;
+ char sec[32];
+
+ if (dhd->conf->chip == BCM43362_CHIP_ID)
+ goto set_ssid;
if (conn_info->channel) {
chan_cnt = 1;
}
+ iovar_buf = kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (iovar_buf == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
/*
* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
}
ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec));
+ WL_MSG(dev->name,
+ "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
+ &ext_join_params->assoc.bssid, conn_info->channel,
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec);
err = wl_ext_iovar_setbuf_bsscfg(dev, "join", ext_join_params,
- join_params_size, iovar_buf, WLC_IOCTL_SMLEN, conn_info->bssidx, NULL);
+ join_params_size, iovar_buf, WLC_IOCTL_MAXLEN, conn_info->bssidx, NULL);
- ANDROID_MSG(("Connecting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
- MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), conn_info->channel,
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len));
-
- kfree(ext_join_params);
if (err) {
if (err == BCME_UNSUPPORTED) {
- ANDROID_TRACE(("join iovar is not supported\n"));
+ AEXT_TRACE(dev->name, "join iovar is not supported\n");
goto set_ssid;
} else {
- ANDROID_ERROR(("error (%d)\n", err));
+ AEXT_ERROR(dev->name, "error (%d)\n", err);
goto exit;
}
} else
memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN);
wl_ext_ch_to_chanspec(ioctl_ver, conn_info->channel, &join_params, &join_params_size);
- ANDROID_TRACE(("join_param_size %zu\n", join_params_size));
+ AEXT_TRACE(dev->name, "join_param_size %zu\n", join_params_size);
if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- ANDROID_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
- join_params.ssid.SSID_len));
- }
+ AEXT_INFO(dev->name, "ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len);
+ }
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec));
+ WL_MSG(dev->name,
+ "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
+ &join_params.params.bssid, conn_info->channel,
+ join_params.ssid.SSID, join_params.ssid.SSID_len, sec);
err = wl_ext_ioctl(dev, WLC_SET_SSID, &join_params,join_params_size, 1);
exit:
+ if (iovar_buf)
+ kfree(iovar_buf);
+ if (ext_join_params)
+ kfree(ext_join_params);
return err;
}
-#if defined(WL_WIRELESS_EXT)
-void wl_ext_pm_work_handler(struct work_struct * work)
-{
- struct wl_conn_info *conn_info;
- s32 pm = PM_FAST;
- dhd_pub_t *dhd;
- BCM_SET_CONTAINER_OF(conn_info, work, struct wl_conn_info, pm_enable_work.work);
-
- ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif
-
- dhd = (dhd_pub_t *)(conn_info->dhd);
- if (!dhd || !conn_info->dhd->up) {
- ANDROID_TRACE(("%s: dhd is null or not up\n", __FUNCTION__));
- return;
- }
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- wl_ext_ioctl(conn_info->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif
- DHD_PM_WAKE_UNLOCK(conn_info->dhd);
-
-}
-
-void wl_ext_add_remove_pm_enable_work(struct wl_conn_info *conn_info,
- bool add)
+void
+wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len)
{
- u16 wq_duration = 0;
- s32 pm = PM_OFF;
-
- if (conn_info == NULL || conn_info->dhd == NULL)
- return;
-
- mutex_lock(&conn_info->pm_sync);
- /*
- * Make cancel and schedule work part mutually exclusive
- * so that while cancelling, we are sure that there is no
- * work getting scheduled.
- */
+ int auth=-1, wpa_auth=-1, wsec=0;
+ int bytes_written=0;
- if (delayed_work_pending(&conn_info->pm_enable_work)) {
- cancel_delayed_work_sync(&conn_info->pm_enable_work);
- DHD_PM_WAKE_UNLOCK(conn_info->dhd);
- }
+ memset(sec, 0, total_len);
+ wl_ext_iovar_getint(dev, "auth", &auth);
+ wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+ wl_ext_iovar_getint(dev, "wsec", &wsec);
- if (add) {
- wq_duration = (WL_PM_ENABLE_TIMEOUT);
+#if defined(WL_EXT_IAPSTA) && defined(WLMESH)
+ if (ifmode == IMESH_MODE) {
+ if (auth == 0 && wpa_auth == 0) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "open");
+ } else if (auth == 0 && wpa_auth == 128) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "sae");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "unknown");
+ }
+ } else
+#endif
+ if (auth == 0 && wpa_auth == 0) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "open");
+ } else if (auth == 1 && wpa_auth == 0) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "shared");
+ } else if (auth == 0 && wpa_auth == 4) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "wpapsk");
+ } else if (auth == 0 && wpa_auth == 128) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "wpa2psk");
+ } else if (auth == 0 && wpa_auth == 132) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "wpawpa2psk");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "(%d/%d)",
+ auth, wpa_auth);
}
- /* It should schedule work item only if driver is up */
- if (wq_duration && conn_info->dhd->up) {
- if (dhd_conf_get_pm(conn_info->dhd) >= 0)
- pm = dhd_conf_get_pm(conn_info->dhd);
- wl_ext_ioctl(conn_info->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
- if (schedule_delayed_work(&conn_info->pm_enable_work,
- msecs_to_jiffies((const unsigned int)wq_duration))) {
- DHD_PM_WAKE_LOCK_TIMEOUT(conn_info->dhd, wq_duration);
+#if defined(WL_EXT_IAPSTA) && defined(WLMESH)
+ if (ifmode == IMESH_MODE) {
+ if (wsec == 0) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/none");
} else {
- ANDROID_ERROR(("%s: Can't schedule pm work handler\n", __FUNCTION__));
+ bytes_written += snprintf(sec+bytes_written, total_len, "/sae");
}
+ } else
+#endif
+ if (wsec == 0) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/none");
+ } else if (wsec == 1) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/wep");
+ } else if (wsec == 2 || wsec == 10) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/tkip");
+ } else if (wsec == 4 || wsec == 12) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/aes");
+ } else if (wsec == 6 || wsec == 14) {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/tkipaes");
+ } else {
+ bytes_written += snprintf(sec+bytes_written, total_len, "/%d", wsec);
}
- mutex_unlock(&conn_info->pm_sync);
-
}
-#endif /* defined(WL_WIRELESS_EXT) */
#ifdef WL_EXT_IAPSTA
static int
wl_ext_iovar_setint(dev, "nmode", 0);
val = 0;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- ANDROID_TRACE(("%s: Network mode: B only\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Network mode: B only\n");
} else if (bgnmode == IEEE80211G) {
wl_ext_iovar_setint(dev, "nmode", 0);
val = 2;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- ANDROID_TRACE(("%s: Network mode: G only\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Network mode: G only\n");
} else if (bgnmode == IEEE80211BG) {
wl_ext_iovar_setint(dev, "nmode", 0);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- ANDROID_TRACE(("%s: Network mode: B/G mixed\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Network mode: B/G mixed\n");
} else if (bgnmode == IEEE80211BGN) {
wl_ext_iovar_setint(dev, "nmode", 0);
wl_ext_iovar_setint(dev, "nmode", 1);
wl_ext_iovar_setint(dev, "vhtmode", 0);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- ANDROID_TRACE(("%s: Network mode: B/G/N mixed\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Network mode: B/G/N mixed\n");
} else if (bgnmode == IEEE80211BGNAC) {
wl_ext_iovar_setint(dev, "nmode", 0);
wl_ext_iovar_setint(dev, "nmode", 1);
wl_ext_iovar_setint(dev, "vhtmode", 1);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- ANDROID_TRACE(("%s: Network mode: B/G/N/AC mixed\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Network mode: B/G/N/AC mixed\n");
}
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
return 0;
}
-static void
-wl_ext_get_amode(struct wl_if_info *cur_if, char *amode)
+static int
+wl_ext_set_amode(struct wl_if_info *cur_if)
{
struct net_device *dev = cur_if->dev;
- int auth=-1, wpa_auth=-1;
-
- wl_ext_iovar_getint(dev, "auth", &auth);
- wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+ authmode_t amode = cur_if->amode;
+ int auth=0, wpa_auth=0;
#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
- if (auth == 0 && wpa_auth == 0) {
- strcpy(amode, "open");
- } else if (auth == 0 && wpa_auth == 128) {
- strcpy(amode, "sae");
- }
- } else
-#endif
- if (auth == 0 && wpa_auth == 0) {
- strcpy(amode, "open");
- } else if (auth == 1 && wpa_auth == 0) {
- strcpy(amode, "shared");
- } else if (auth == 0 && wpa_auth == 4) {
- strcpy(amode, "wpapsk");
- } else if (auth == 0 && wpa_auth == 128) {
- strcpy(amode, "wpa2psk");
- } else if (auth == 0 && wpa_auth == 132) {
- strcpy(amode, "wpawpa2psk");
- }
-}
-
-static void
-wl_ext_get_emode(struct wl_if_info *cur_if, char *emode)
-{
- struct net_device *dev = cur_if->dev;
- int wsec=0;
-
- wl_ext_iovar_getint(dev, "wsec", &wsec);
-
-#ifdef WLMESH
- if (cur_if->ifmode == IMESH_MODE) {
- if (wsec == 0) {
- strcpy(emode, "none");
- } else {
- strcpy(emode, "sae");
- }
- } else
-#endif
- if (wsec == 0) {
- strcpy(emode, "none");
- } else if (wsec == 1) {
- strcpy(emode, "wep");
- } else if (wsec == 2 || wsec == 10) {
- strcpy(emode, "tkip");
- } else if (wsec == 4 || wsec == 12) {
- strcpy(emode, "aes");
- } else if (wsec == 6 || wsec == 14) {
- strcpy(emode, "tkipaes");
- }
-}
-
-static int
-wl_ext_set_amode(struct wl_if_info *cur_if)
-{
- struct net_device *dev = cur_if->dev;
- authmode_t amode = cur_if->amode;
- int auth=0, wpa_auth=0;
-
-#ifdef WLMESH
- if (cur_if->ifmode == IMESH_MODE) {
- if (amode == AUTH_SAE) {
- auth = 0;
- wpa_auth = 128;
- ANDROID_INFO(("%s: Authentication: SAE\n", __FUNCTION__));
- } else {
- auth = 0;
- wpa_auth = 0;
- ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
+ if (amode == AUTH_SAE) {
+ auth = 0;
+ wpa_auth = 128;
+ AEXT_INFO(dev->name, "SAE\n");
+ } else {
+ auth = 0;
+ wpa_auth = 0;
+ AEXT_INFO(dev->name, "Open System\n");
}
} else
#endif
if (amode == AUTH_OPEN) {
auth = 0;
wpa_auth = 0;
- ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "Open System\n");
} else if (amode == AUTH_SHARED) {
auth = 1;
wpa_auth = 0;
- ANDROID_INFO(("%s: Authentication: Shared Key\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "Shared Key\n");
} else if (amode == AUTH_WPAPSK) {
auth = 0;
wpa_auth = 4;
- ANDROID_INFO(("%s: Authentication: WPA-PSK\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "WPA-PSK\n");
} else if (amode == AUTH_WPA2PSK) {
auth = 0;
wpa_auth = 128;
- ANDROID_INFO(("%s: Authentication: WPA2-PSK\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "WPA2-PSK\n");
} else if (amode == AUTH_WPAWPA2PSK) {
auth = 0;
wpa_auth = 132;
- ANDROID_INFO(("%s: Authentication: WPA/WPA2-PSK\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "WPA/WPA2-PSK\n");
}
#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
}
static int
-wl_ext_set_emode(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+wl_ext_set_emode(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
{
struct net_device *dev = cur_if->dev;
int wsec=0;
authmode_t amode = cur_if->amode;
encmode_t emode = cur_if->emode;
char *key = cur_if->key;
- struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct dhd_pub *dhd = apsta_params->dhd;
memset(&wsec_key, 0, sizeof(wsec_key));
memset(&psk, 0, sizeof(psk));
if (cur_if->ifmode == IMESH_MODE) {
if (amode == AUTH_SAE) {
wsec = 4;
- ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
} else {
wsec = 0;
- ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
}
} else
#endif
if (emode == ENC_NONE) {
wsec = 0;
- ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
+ AEXT_INFO(dev->name, "No securiy\n");
} else if (emode == ENC_WEP) {
wsec = 1;
wl_ext_parse_wep(key, &wsec_key);
- ANDROID_INFO(("%s: Encryption: WEP\n", __FUNCTION__));
- ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, wsec_key.data));
+ AEXT_INFO(dev->name, "WEP key \"%s\"\n", wsec_key.data);
} else if (emode == ENC_TKIP) {
wsec = 2;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- ANDROID_INFO(("%s: Encryption: TKIP\n", __FUNCTION__));
- ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+ AEXT_INFO(dev->name, "TKIP key \"%s\"\n", psk.key);
} else if (emode == ENC_AES || amode == AUTH_SAE) {
wsec = 4;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
- ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+ AEXT_INFO(dev->name, "AES key \"%s\"\n", psk.key);
} else if (emode == ENC_TKIPAES) {
wsec = 6;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- ANDROID_INFO(("%s: Encryption: TKIP/AES\n", __FUNCTION__));
- ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+ AEXT_INFO(dev->name, "TKIP/AES key \"%s\"\n", psk.key);
}
if (dhd->conf->chip == BCM43430_CHIP_ID && cur_if->ifidx > 0 && wsec >= 2 &&
- apsta_params->apstamode == IAPSTA_MODE) {
+ apsta_params->apstamode == ISTAAP_MODE) {
wsec |= 0x8; // terence 20180628: fix me, this is a workaround
}
if (cur_if->ifmode == IMESH_MODE) {
if (amode == AUTH_SAE) {
s8 iovar_buf[WLC_IOCTL_SMLEN];
- ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, key));
+ AEXT_INFO(dev->name, "SAE key \"%s\"\n", key);
wl_ext_iovar_setint(dev, "mesh_auth_proto", 1);
wl_ext_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
wl_ext_iovar_setbuf(dev, "sae_password", key, strlen(key),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
} else {
+ AEXT_INFO(dev->name, "No securiy\n");
wl_ext_iovar_setint(dev, "mesh_auth_proto", 0);
wl_ext_iovar_setint(dev, "mfp", WL_MFP_NONE);
}
if (emode == ENC_WEP) {
wl_ext_ioctl(dev, WLC_SET_KEY, &wsec_key, sizeof(wsec_key), 1);
} else if (emode == ENC_TKIP || emode == ENC_AES || emode == ENC_TKIPAES) {
- if (dev) {
- if (cur_if->ifmode == ISTA_MODE)
- wl_ext_iovar_setint(dev, "sup_wpa", 1);
- wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
- } else {
- ANDROID_ERROR(("%s: apdev is null\n", __FUNCTION__));
+ if (cur_if->ifmode == ISTA_MODE)
+ wl_ext_iovar_setint(dev, "sup_wpa", 1);
+ wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
+ }
+
+ return 0;
+}
+
+static u32
+wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
+ struct net_device *dev)
+{
+ int ret = 0;
+ struct ether_addr bssid;
+ u32 chanspec = 0;
+
+ ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ if (ret != BCME_NOTASSOCIATED && memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {
+ if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
+ return chanspec;
}
}
}
static chanspec_t
-wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
+wl_ext_chan_to_chanspec(struct wl_apsta_params *apsta_params,
struct net_device *dev, uint16 channel)
{
s32 _chan = channel;
iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- ANDROID_ERROR(("bw_cap failed, %d\n", err));
+ AEXT_ERROR(dev->name, "bw_cap failed, %d\n", err);
return err;
} else {
err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
if (wf_chspec_valid(chspec)) {
fw_chspec = wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver, chspec);
if (fw_chspec == INVCHANSPEC) {
- ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
- __FUNCTION__));
+ AEXT_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
fw_chspec = 0;
}
} else {
bw = 0;
if (bw)
goto set_channel;
- ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
+ AEXT_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
err = BCME_ERROR;
}
return fw_chspec;
}
+static bool
+wl_ext_dfs_chan(struct wl_apsta_params *apsta_params, struct net_device *dev,
+ uint16 chan)
+{
+ u32 channel = chan;
+ s32 err = BCME_OK;
+ bool dfs = FALSE;
+
+ if (chan <= CH_MAX_2G_CHANNEL)
+ return FALSE;
+
+ channel |= WL_CHANSPEC_BAND_5G;
+ channel |= WL_CHANSPEC_BW_20;
+ channel = wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver, channel);
+ err = wl_ext_iovar_getint(dev, "per_chan_info", &channel);
+ if (!err) {
+ if (channel & WL_CHAN_PASSIVE) {
+ dfs = TRUE;
+ }
+ }
+
+ return dfs;
+}
+
static void
wl_ext_wait_netif_change(struct wl_apsta_params *apsta_params,
- bool need_rtnl_unlock)
+ struct wl_if_info *cur_if)
{
- if (need_rtnl_unlock)
- rtnl_unlock();
+ rtnl_unlock();
wait_event_interruptible_timeout(apsta_params->netif_change_event,
- apsta_params->netif_change, msecs_to_jiffies(1500));
- if (need_rtnl_unlock)
- rtnl_lock();
+ wl_get_isam_status(cur_if, IF_READY),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ rtnl_lock();
}
bool
{
struct dhd_pub *dhd = dhd_get_pub(net);
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if;
+ int i;
- if (apsta_params)
- return apsta_params->mesh_creating;
+ if (apsta_params) {
+ for (i=0; i<MAX_IF_NUM; i++) {
+ cur_if = &apsta_params->if_info[i];
+ if (cur_if->ifmode==IMESH_MODE && wl_get_isam_status(cur_if, IF_ADDING))
+ return TRUE;
+ }
+ }
return FALSE;
}
+bool
+wl_ext_check_other_enabling(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if;
+ bool enabling = FALSE;
+ u32 timeout = 1;
+ int i;
+
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->dev != cur_if->dev) {
+ if (tmp_if->ifmode == ISTA_MODE)
+ enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
+ else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
+ enabling = wl_get_isam_status(tmp_if, AP_CREATING);
+ if (enabling)
+ WL_MSG(cur_if->ifname, "waiting for %s[%c] enabling...\n",
+ tmp_if->ifname, tmp_if->prefix);
+ if (enabling && tmp_if->ifmode == ISTA_MODE) {
+ timeout = wait_event_interruptible_timeout(
+ apsta_params->netif_change_event,
+ !wl_get_isam_status(tmp_if, STA_CONNECTING),
+ msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
+ } else if (enabling &&
+ (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)) {
+ timeout = wait_event_interruptible_timeout(
+ apsta_params->netif_change_event,
+ !wl_get_isam_status(tmp_if, AP_CREATING),
+ msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
+ }
+ if (tmp_if->ifmode == ISTA_MODE)
+ enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
+ else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
+ enabling = wl_get_isam_status(tmp_if, AP_CREATING);
+ if (timeout <= 0 || enabling) {
+ WL_MSG(cur_if->ifname, "%s[%c] is still enabling...\n",
+ tmp_if->ifname, tmp_if->prefix);
+ }
+ }
+ }
+
+ return enabling;
+}
+
static void
wl_ext_iapsta_preinit(struct net_device *dev, struct wl_apsta_params *apsta_params)
{
struct dhd_pub *dhd;
apstamode_t apstamode = apsta_params->apstamode;
- wl_interface_create_t iface;
struct wl_if_info *cur_if;
- wlc_ssid_t ssid = { 0, {0} };
s8 iovar_buf[WLC_IOCTL_SMLEN];
- wl_country_t cspec = {{0}, 0, {0}};
- wl_p2p_if_t ifreq;
s32 val = 0;
- int i, dfs = 1;
+ int i, dfs = 1, disable_5g_band = 0;
dhd = dhd_get_pub(dev);
for (i=0; i<MAX_IF_NUM; i++) {
cur_if = &apsta_params->if_info[i];
- if (i == 1 && !strlen(cur_if->ifname))
- strcpy(cur_if->ifname, "wlan1");
- if (i == 2 && !strlen(cur_if->ifname))
- strcpy(cur_if->ifname, "wlan2");
+ if (i >= 1 && !strlen(cur_if->ifname))
+ snprintf(cur_if->ifname, IFNAMSIZ, "wlan%d", i);
if (cur_if->ifmode == ISTA_MODE) {
cur_if->channel = 0;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_STA;
cur_if->prefix = 'S';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
} else if (cur_if->ifmode == IAP_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_AP;
cur_if->prefix = 'A';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
} else if (cur_if->ifmode == IMESH_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_MESH;
cur_if->prefix = 'M';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
}
if (!dfs && !apsta_params->vsdb) {
- dhd_conf_get_country(dhd, &cspec);
- if (!dhd_conf_map_country_list(dhd, &cspec)) {
- dhd_conf_set_country(dhd, &cspec);
- dhd_bus_country_set(dev, &cspec, TRUE);
- }
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "dfs_chan_disable", 1);
+ wl_ext_iovar_getint(dev, "disable_5g_band", &disable_5g_band);
+ disable_5g_band |= 0x6;
+ wl_ext_iovar_setint(dev, "disable_5g_band", disable_5g_band);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+#ifdef WL_CFG80211
+ if (dhd->up) {
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_update_wiphybands(cfg, true);
+ }
+#endif
}
if (FW_SUPPORTED(dhd, rsdb)) {
apsta_params->rsdb = TRUE;
else if (apstamode == ISTAAPAP_MODE)
apsta_params->rsdb = FALSE;
- if (apstamode == IDUALAP_MODE || apstamode == ISTAAPAP_MODE ||
- apstamode == IMESHONLY_MODE || apstamode == IMESHSTA_MODE ||
- apstamode == IMESHAP_MODE || apstamode == IMESHAPSTA_MODE ||
- apstamode == IMESHAPAP_MODE) {
+ if (apstamode == ISTAAP_MODE || apstamode == ISTAAPAP_MODE ||
+ apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
+ apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
+ apstamode == IMESHAPAP_MODE) {
wl_config_t rsdb_mode_cfg = {0, 0};
if (apsta_params->rsdb)
rsdb_mode_cfg.config = 1;
- ANDROID_MSG(("%s: set rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config));
+ AEXT_INFO(dev->name, "set rsdb_mode %d\n", rsdb_mode_cfg.config);
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg,
sizeof(rsdb_mode_cfg), iovar_buf, sizeof(iovar_buf), NULL);
wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
- if (!apsta_params->rsdb && !disable_proptx) {
+ if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
bool enabled;
dhd_wlfc_get_enable(dhd, &enabled);
if (!enabled) {
#endif
#endif /* PROP_TXSTATUS_VSDB */
}
- else if (apstamode == IAPSTA_MODE) {
+ else if (apstamode == ISTAAP_MODE) {
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
wl_ext_iovar_setint(dev, "apsta", 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- apsta_params->netif_change = FALSE;
- if (apsta_params->rsdb) {
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface,
- sizeof(iface), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- } else {
- wl_ext_iovar_setbuf_bsscfg(dev, "ssid", &ssid, sizeof(ssid),
- iovar_buf, WLC_IOCTL_SMLEN, 1, NULL);
- }
- wl_ext_wait_netif_change(apsta_params, TRUE);
+ }
+ else if (apstamode == ISTAGO_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "apsta", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ }
+ else if (apstamode == ISTASTA_MODE) {
}
else if (apstamode == IDUALAP_MODE) {
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
val = 1;
wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP;
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
}
else if (apstamode == ISTAAPAP_MODE) {
- u8 rand_bytes[2] = {0, };
- get_random_bytes(&rand_bytes, sizeof(rand_bytes));
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
wl_ext_iovar_setint(dev, "mbss", 1);
wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
- memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
- iface.mac_addr.octet[0] |= 0x02;
- iface.mac_addr.octet[5] += 0x01;
- memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
- memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
- iface.mac_addr.octet[0] |= 0x02;
- iface.mac_addr.octet[5] += 0x02;
- memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
}
#ifdef WLMESH
- else if (apstamode == IMESHONLY_MODE) {
+ else if (apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
+ apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
+ apstamode == IMESHAPAP_MODE) {
int pm = 0;
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
+ if (apstamode == IMESHONLY_MODE)
+ wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ else
+ wl_ext_iovar_setint(dev, "mbcn", 1);
wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
- wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
}
- else if (apstamode == IMESHSTA_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "mpc", 0);
- wl_ext_iovar_setint(dev, "mbcn", 1);
- wl_ext_iovar_setint(dev, "apsta", 1);
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_STA;
- apsta_params->netif_change = FALSE;
- apsta_params->mesh_creating = TRUE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- apsta_params->mesh_creating = FALSE;
- }
- else if (apstamode == IMESHAP_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "mpc", 0);
- wl_ext_iovar_setint(dev, "mbcn", 1);
- wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_STA;
- apsta_params->netif_change = FALSE;
- apsta_params->mesh_creating = TRUE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- apsta_params->mesh_creating = FALSE;
+#endif
+
+ wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+ apsta_params->init = TRUE;
+
+ WL_MSG(dev->name, "apstamode=%d\n", apstamode);
+}
+
+static void
+wl_ext_interface_create(struct net_device *dev, struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if, int iftype, u8 *addr)
+{
+ wl_interface_create_t iface;
+ u8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ bzero(&iface, sizeof(iface));
+ if (addr) {
+ iftype |= WL_INTERFACE_MAC_USE;
}
- else if (apstamode == IMESHAPSTA_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "mpc", 0);
- wl_ext_iovar_setint(dev, "mbcn", 1);
- wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP;
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_STA;
- apsta_params->netif_change = FALSE;
- apsta_params->mesh_creating = TRUE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- apsta_params->mesh_creating = FALSE;
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = iftype;
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
}
- else if (apstamode == IMESHAPAP_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "mpc", 0);
- wl_ext_iovar_setint(dev, "mbcn", 1);
- wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP;
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
- bzero(&iface, sizeof(wl_interface_create_t));
- iface.ver = WL_INTERFACE_CREATE_VER;
- iface.flags = WL_INTERFACE_CREATE_AP;
- apsta_params->netif_change = FALSE;
- wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
+ wl_set_isam_status(cur_if, IF_ADDING);
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, cur_if);
+}
+
+static void
+wl_ext_iapsta_intf_add(struct net_device *dev, struct wl_apsta_params *apsta_params)
+{
+ struct dhd_pub *dhd;
+ apstamode_t apstamode = apsta_params->apstamode;
+ struct wl_if_info *cur_if;
+ wlc_ssid_t ssid = { 0, {0} };
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_p2p_if_t ifreq;
+ struct ether_addr mac_addr;
+
+ dhd = dhd_get_pub(dev);
+ bzero(&mac_addr, sizeof(mac_addr));
+
+ if (apstamode == ISTAAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ } else {
+ wl_set_isam_status(cur_if, IF_ADDING);
+ wl_ext_iovar_setbuf_bsscfg(dev, "ssid", &ssid, sizeof(ssid),
+ iovar_buf, WLC_IOCTL_SMLEN, 1, NULL);
+ wl_ext_wait_netif_change(apsta_params, cur_if);
+ }
}
-#endif
- else if (apstamode == IGOSTA_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "apsta", 1);
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ else if (apstamode == ISTAGO_MODE) {
bzero(&ifreq, sizeof(wl_p2p_if_t));
ifreq.type = htod32(WL_P2P_IF_GO);
- apsta_params->netif_change = FALSE;
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_set_isam_status(cur_if, IF_ADDING);
wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, TRUE);
+ wl_ext_wait_netif_change(apsta_params, cur_if);
}
+ else if (apstamode == ISTASTA_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA,
+ (u8*)&mac_addr);
+ }
+ else if (apstamode == IDUALAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ }
+ else if (apstamode == ISTAAPAP_MODE) {
+ u8 rand_bytes[2] = {0, };
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+ cur_if = &apsta_params->if_info[IF_VIF];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ mac_addr.octet[5] += 0x01;
+ memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP,
+ (u8*)&mac_addr);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ mac_addr.octet[0] |= 0x02;
+ mac_addr.octet[5] += 0x02;
+ memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP,
+ (u8*)&mac_addr);
+ }
+#ifdef WLMESH
+ else if (apstamode == ISTAMESH_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA, NULL);
+ }
+ else if (apstamode == IMESHAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ }
+ else if (apstamode == ISTAAPMESH_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA, NULL);
+ }
+ else if (apstamode == IMESHAPAP_MODE) {
+ cur_if = &apsta_params->if_info[IF_VIF];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ cur_if = &apsta_params->if_info[IF_VIF2];
+ wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
+ }
+#endif
- wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
- apsta_params->init = TRUE;
-
- ANDROID_MSG(("%s: apstamode=%d\n", __FUNCTION__, apstamode));
}
static int
char *pch, *pick_tmp, *pick_tmp2, *param;
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
int i;
+
+#ifdef WL_STATIC_IF
+ AEXT_ERROR(dev->name, "please remove WL_STATIC_IF from Makefile\n");
+ return -1;
+#endif
if (apsta_params->init) {
- ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "don't init twice\n");
return -1;
}
-
- ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_init
} else if (!strcmp(pick_tmp2, "ap")) {
apsta_params->apstamode = IAPONLY_MODE;
} else if (!strcmp(pick_tmp2, "sta-ap")) {
- apsta_params->apstamode = IAPSTA_MODE;
+ apsta_params->apstamode = ISTAAP_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-sta")) {
+ apsta_params->apstamode = ISTASTA_MODE;
+ apsta_params->vsdb = TRUE;
} else if (!strcmp(pick_tmp2, "ap-ap")) {
apsta_params->apstamode = IDUALAP_MODE;
} else if (!strcmp(pick_tmp2, "sta-ap-ap")) {
apsta_params->apstamode = ISTAAPAP_MODE;
-#ifdef WLMESH
- } else if (!strcmp(pick_tmp2, "mesh")) {
- apsta_params->apstamode = IMESHONLY_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-sta") ||
- !strcmp(pick_tmp2, "sta-mesh")) {
- apsta_params->apstamode = IMESHSTA_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-ap") ||
- !strcmp(pick_tmp2, "ap-mesh")) {
- apsta_params->apstamode = IMESHAP_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-ap-sta") ||
- !strcmp(pick_tmp2, "sta-ap-mesh") ||
- !strcmp(pick_tmp2, "sta-mesh-ap")) {
- apsta_params->apstamode = IMESHAPSTA_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-ap-ap") ||
- !strcmp(pick_tmp2, "ap-ap-mesh")) {
- apsta_params->apstamode = IMESHAPAP_MODE;
-#endif
} else if (!strcmp(pick_tmp2, "apsta")) {
- apsta_params->apstamode = IAPSTA_MODE;
+ apsta_params->apstamode = ISTAAP_MODE;
apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
} else if (!strcmp(pick_tmp2, "dualap")) {
apsta_params->apstamode = IDUALAP_MODE;
apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
- } else if (!strcmp(pick_tmp2, "gosta")) {
+ } else if (!strcmp(pick_tmp2, "sta-go") ||
+ !strcmp(pick_tmp2, "gosta")) {
if (!FW_SUPPORTED(dhd, p2p)) {
return -1;
}
- apsta_params->apstamode = IGOSTA_MODE;
+ apsta_params->apstamode = ISTAGO_MODE;
apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+#ifdef WLMESH
+ } else if (!strcmp(pick_tmp2, "mesh")) {
+ apsta_params->apstamode = IMESHONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-mesh")) {
+ apsta_params->apstamode = ISTAMESH_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap-mesh")) {
+ apsta_params->apstamode = ISTAAPMESH_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap")) {
+ apsta_params->apstamode = IMESHAP_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap-ap")) {
+ apsta_params->apstamode = IMESHAPAP_MODE;
+#endif
} else {
- ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
return -1;
}
pch = bcmstrtok(&pick_tmp2, " -", 0);
#ifdef WLMESH
else if (!strcmp(pch, "mesh")) {
if (dhd->conf->fw_type != FW_TYPE_MESH) {
- ANDROID_ERROR(("%s: wrong fw type\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "wrong fw type\n");
return -1;
}
apsta_params->if_info[i].ifmode = IMESH_MODE;
} else if (!strcmp(pch, "n")) {
apsta_params->rsdb = FALSE;
} else {
- ANDROID_ERROR(("%s: rsdb [y|n]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "rsdb [y|n]\n");
return -1;
}
}
} else if (!strcmp(pch, "n")) {
apsta_params->vsdb = FALSE;
} else {
- ANDROID_ERROR(("%s: vsdb [y|n]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "vsdb [y|n]\n");
return -1;
}
}
if (pch)
strcpy(apsta_params->if_info[IF_VIF].ifname, pch);
else {
- ANDROID_ERROR(("%s: vifname [wlan1]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "vifname [wlan1]\n");
return -1;
}
}
}
if (apsta_params->apstamode == 0) {
- ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
return -1;
}
wl_ext_iapsta_preinit(dev, apsta_params);
+ wl_ext_iapsta_intf_add(dev, apsta_params);
return 0;
}
else if (!strcmp(pick_tmp, "bgnac"))
cur_if->bgnmode = IEEE80211BGNAC;
else {
- ANDROID_ERROR(("%s: bgnmode [b|g|bg|bgn|bgnac]\n", __FUNCTION__));
+ AEXT_ERROR(cur_if->dev->name, "bgnmode [b|g|bg|bgn|bgnac]\n");
return -1;
}
} else if (!strcmp(row->name, " hidden ")) {
else if (!strcmp(pick_tmp, "y"))
cur_if->hidden = 1;
else {
- ANDROID_ERROR(("%s: hidden [y|n]\n", __FUNCTION__));
+ AEXT_ERROR(cur_if->dev->name, "hidden [y|n]\n");
return -1;
}
} else if (!strcmp(row->name, " maxassoc ")) {
else if (!strcmp(pick_tmp, "sae"))
cur_if->amode = AUTH_SAE;
else {
- ANDROID_ERROR(("%s: amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n",
- __FUNCTION__));
+ AEXT_ERROR(cur_if->dev->name, "amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n");
return -1;
}
} else if (!strcmp(row->name, " emode ")) {
else if (!strcmp(pick_tmp, "tkipaes"))
cur_if->emode = ENC_TKIPAES;
else {
- ANDROID_ERROR(("%s: emode [none|wep|tkip|aes|tkipaes]\n",
- __FUNCTION__));
+ AEXT_ERROR(cur_if->dev->name, "emode [none|wep|tkip|aes|tkipaes]\n");
return -1;
}
} else if (!strcmp(row->name, " key ")) {
char *pch, *pch2, *pick_tmp, *pick_next=NULL, *param;
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
char ifname[IFNAMSIZ+1];
- struct wl_if_info *cur_if = NULL;
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
if (!apsta_params->init) {
- ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "please init first\n");
return -1;
}
- ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_config
+ mutex_lock(&apsta_params->usr_sync);
+
while (pick_tmp != NULL) {
memset(ifname, 0, IFNAMSIZ+1);
if (!strncmp(pick_tmp, "ifname ", strlen("ifname "))) {
if (pch && pch2) {
strncpy(ifname, pch, pch2-pch);
} else {
- ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
- return -1;
+ AEXT_ERROR(dev->name, "ifname [wlanX]\n");
+ ret = -1;
+ break;
}
for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].dev &&
- !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
- cur_if = &apsta_params->if_info[i];
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
break;
}
}
if (!cur_if) {
- ANDROID_ERROR(("%s: wrong ifname=%s in apstamode=%d\n",
- __FUNCTION__, ifname, apsta_params->apstamode));
- return -1;
+ AEXT_ERROR(dev->name, "wrong ifname=%s in apstamode=%d\n",
+ ifname, apsta_params->apstamode);
+ ret = -1;
+ break;
}
ret = wl_ext_parse_config(cur_if, pick_tmp, &pick_next);
if (ret)
- return -1;
+ break;
pick_tmp = pick_next;
} else {
- ANDROID_ERROR(("%s: first arg must be ifname\n", __FUNCTION__));
- return -1;
+ AEXT_ERROR(dev->name, "first arg must be ifname\n");
+ ret = -1;
+ break;
}
}
- return 0;
+ mutex_unlock(&apsta_params->usr_sync);
+
+ return ret;
}
static int
-wl_ext_isam_status(struct net_device *dev, char *command, int total_len)
+wl_ext_assoclist(struct net_device *dev, char *data, char *command,
+ int total_len)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int i;
- bool now_if;
- struct wl_if_info *tmp_if;
- uint16 chan = 0;
- wlc_ssid_t ssid = { 0, {0} };
- char amode[16], emode[16];
- int bytes_written=0;
-
- if (apsta_params->init == FALSE) {
- return 0;
- }
+ int ret = 0, i, maxassoc = 0, bytes_written = 0;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
- ANDROID_INFO(("****************************\n"));
- ANDROID_INFO(("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode));
- for (i=0; i<MAX_IF_NUM; i++) {
- now_if = FALSE;
- memset(&ssid, 0, sizeof(ssid));
- memset(amode, 0, sizeof(amode));
- memset(emode, 0, sizeof(emode));
- tmp_if = &apsta_params->if_info[i];
- if (dev == tmp_if->dev)
- now_if = TRUE;
- if (tmp_if->dev) {
- chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (chan) {
- wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
- wl_ext_get_amode(tmp_if, amode);
- wl_ext_get_emode(tmp_if, emode);
- ANDROID_INFO(("%s[%c-%c%s]: chan %3d, amode %s, emode %s, SSID \"%s\"\n",
- tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
- now_if?"*":" ", chan, amode, emode, ssid.SSID));
- if (command)
- bytes_written += snprintf(command+bytes_written, total_len,
- "%s[%c-%c%s]: chan %3d, amode %s, emode %s, SSID \"%s\"\n",
- tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
- now_if?"*":" ", chan, amode, emode, ssid.SSID);
- } else {
- ANDROID_INFO(("%s[%c-%c%s]:\n",
- tmp_if->ifname, tmp_if->prefix, chan?'E':'D', now_if?"*":" "));
- if (command)
- bytes_written += snprintf(command+bytes_written, total_len, "%s[%c-%c%s]:\n",
- tmp_if->ifname, tmp_if->prefix, chan?'E':'D', now_if?"*":" ");
- }
- }
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+ ret = wl_ext_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), 0);
+ if (ret)
+ return -1;
+ maxassoc = dtoh32(assoc_maclist->count);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%2s: %12s",
+ "no", "------addr------");
+ for (i=0; i<maxassoc; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "\n%2d: %pM", i, &assoc_maclist->ea[i]);
}
- ANDROID_INFO(("****************************\n"));
return bytes_written;
}
" %5s : %4s : %4s : %11s : %4s",
"no", "------addr------ ", "l.aid", "state", "p.aid",
"mppid", "llid", "plid", "entry_state", "rssi");
- for (count = 0; count < peer_results_count; count++) {
+ for (count=0; count < peer_results_count; count++) {
if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT) {
bytes_written += snprintf(command+bytes_written, total_len,
"\n%2d: %pM : 0x%4x : %6s : 0x%4x :"
}
mpi_ext++;
}
- ANDROID_INFO(("\n%s\n", command));
return bytes_written;
}
static int
-wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
- int total_len)
+wl_mesh_get_peer_results(struct net_device *dev, char *buf, int len)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int i;
- struct wl_if_info *cur_if;
- int bytes_written = -1;
int indata, inlen;
- char *dump_buf;
mesh_peer_info_dump_t *peer_results;
- mesh_peer_info_ext_t *mpi_ext;
+ int ret;
- if (apsta_params->init == FALSE) {
- ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
- return -1;
+ memset(buf, 0, len);
+ peer_results = (mesh_peer_info_dump_t *)buf;
+ indata = htod32(len);
+ inlen = 4;
+ ret = wl_ext_iovar_getbuf(dev, "mesh_peer_status", &indata, inlen, buf, len, NULL);
+ if (!ret) {
+ peer_results = (mesh_peer_info_dump_t *)buf;
+ ret = peer_results->count;
}
+ return ret;
+}
+
+static int
+wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int i;
+ struct wl_if_info *cur_if;
+ mesh_peer_info_dump_t *peer_results;
+ mesh_peer_info_ext_t *mpi_ext;
+ char *peer_buf = NULL;
+ int peer_len = WLC_IOCTL_MAXLEN;
+ int dump_written = 0, ret;
+
if (!data) {
- dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (dump_buf == NULL) {
- ANDROID_ERROR(("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_MAXLEN));
+ peer_buf = kmalloc(peer_len, GFP_KERNEL);
+ if (peer_buf == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ peer_len);
return -1;
}
- memset(dump_buf, 0, WLC_IOCTL_MAXLEN);
-
for (i=0; i<MAX_IF_NUM; i++) {
cur_if = &apsta_params->if_info[i];
- if (dev == cur_if->dev && cur_if->ifmode == IMESH_MODE) {
- peer_results = (mesh_peer_info_dump_t *)dump_buf;
- indata = htod32(WLC_IOCTL_MAXLEN);
- inlen = 4;
- bytes_written = wl_ext_iovar_getbuf(dev, "mesh_peer_status",
- &indata, inlen, dump_buf, WLC_IOCTL_MAXLEN, NULL);
- if (!bytes_written) {
- peer_results = (mesh_peer_info_dump_t *)dump_buf;
+ if (cur_if && dev == cur_if->dev && cur_if->ifmode == IMESH_MODE) {
+ memset(peer_buf, 0, peer_len);
+ ret = wl_mesh_get_peer_results(dev, peer_buf, peer_len);
+ if (ret >= 0) {
+ peer_results = (mesh_peer_info_dump_t *)peer_buf;
mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
- bytes_written = wl_mesh_print_peer_info(mpi_ext, peer_results->count,
- command, total_len);
+ dump_written += wl_mesh_print_peer_info(mpi_ext,
+ peer_results->count, command+dump_written,
+ total_len-dump_written);
}
- } else if (dev == cur_if->dev) {
- ANDROID_ERROR(("%s: %s[%c] is not mesh interface\n",
- __FUNCTION__, cur_if->ifname, cur_if->prefix));
+ } else if (cur_if && dev == cur_if->dev) {
+ AEXT_ERROR(dev->name, "[%s][%c] is not mesh interface\n",
+ cur_if->ifname, cur_if->prefix);
}
}
- kfree(dump_buf);
}
-
- return bytes_written;
+
+ if (peer_buf)
+ kfree(peer_buf);
+ return dump_written;
}
#endif
+static int
+wl_ext_isam_status(struct net_device *dev, char *command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int i;
+ bool now_if;
+ struct wl_if_info *tmp_if;
+ uint16 chan = 0;
+ wlc_ssid_t ssid = { 0, {0} };
+ struct ether_addr bssid;
+ scb_val_t scb_val;
+ char sec[32];
+ u32 chanspec = 0;
+ char *dump_buf = NULL;
+ int dump_len = WLC_IOCTL_MEDLEN;
+ int dump_written = 0;
+
+ if (command || android_msg_level & ANDROID_INFO_LEVEL) {
+ if (command) {
+ dump_buf = command;
+ dump_len = total_len;
+ } else {
+ dump_buf = kmalloc(dump_len, GFP_KERNEL);
+ if (dump_buf == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ dump_len);
+ return -1;
+ }
+ }
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "apstamode=%d", apsta_params->apstamode);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ now_if = FALSE;
+ memset(&ssid, 0, sizeof(ssid));
+ memset(&bssid, 0, sizeof(bssid));
+ memset(&scb_val, 0, sizeof(scb_val));
+ tmp_if = &apsta_params->if_info[i];
+ if (dev == tmp_if->dev)
+ now_if = TRUE;
+ if (tmp_if->dev) {
+ chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (chan) {
+ wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+ wldev_ioctl(tmp_if->dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ wldev_ioctl(tmp_if->dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t), 0);
+ chanspec = wl_ext_get_chanspec(apsta_params, tmp_if->dev);
+ wl_ext_get_sec(tmp_if->dev, tmp_if->ifmode, sec, sizeof(sec));
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "\n[dhd-%s][%c-%c%s]: bssid=%pM, chan=%3d(0x%x), "
+ "rssi=%3d, sec=%-15s, SSID=\"%s\"",
+ tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+ now_if?"*":" ", &bssid, chan, chanspec,
+ dtoh32(scb_val.val), sec, ssid.SSID);
+ if (tmp_if->ifmode == IAP_MODE) {
+ dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
+ dump_written += wl_ext_assoclist(tmp_if->dev, NULL,
+ dump_buf+dump_written, dump_len-dump_written);
+ }
+#ifdef WLMESH
+ else if (tmp_if->ifmode == IMESH_MODE) {
+ dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
+ dump_written += wl_ext_mesh_peer_status(tmp_if->dev, NULL,
+ dump_buf+dump_written, dump_len-dump_written);
+ }
+#endif
+ } else {
+ dump_written += snprintf(dump_buf+dump_written, dump_len,
+ "\n[dhd-%s][%c-%c%s]:",
+ tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+ now_if?"*":" ");
+ }
+ }
+ }
+ AEXT_INFO(dev->name, "%s\n", dump_buf);
+ }
+
+ if (!command && dump_buf)
+ kfree(dump_buf);
+ return dump_written;
+}
+
static int
wl_ext_if_down(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
{
} bss_setbuf;
apstamode_t apstamode = apsta_params->apstamode;
- ANDROID_MSG(("%s: %s[%c] Turning off\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix));
+ WL_MSG(cur_if->ifname, "[%c] Turning off...\n", cur_if->prefix);
if (cur_if->ifmode == ISTA_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
}
+ wl_clr_isam_status(cur_if, AP_CREATED);
return 0;
}
} bss_setbuf;
apstamode_t apstamode = apsta_params->apstamode;
chanspec_t fw_chspec;
+ u32 timeout;
+ wlc_ssid_t ssid = { 0, {0} };
+ uint16 chan = 0;
if (cur_if->ifmode != IAP_MODE) {
- ANDROID_ERROR(("%s: Wrong ifmode on %s[%c]\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix));
+ AEXT_ERROR(cur_if->ifname, "Wrong ifmode\n");
return 0;
}
- if (cur_if->channel >= 52 && cur_if->channel <= 148) {
- ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, cur_if->channel));
+ if (wl_ext_dfs_chan(apsta_params, cur_if->dev, cur_if->channel)) {
+ WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
+ cur_if->prefix, cur_if->channel);
return 0;
}
- ANDROID_MSG(("%s: %s[%c] Turning on\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix));
+ WL_MSG(cur_if->ifname, "[%c] Turning on...\n", cur_if->prefix);
wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
&fw_chspec);
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ wl_set_isam_status(cur_if, AP_CREATING);
if (apstamode == IAPONLY_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_UP, NULL, 0, 1);
} else {
sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
}
- OSL_SLEEP(500);
+ timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ wl_get_isam_status(cur_if, AP_CREATED),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
+ wl_ext_if_down(apsta_params, cur_if);
+ WL_MSG(cur_if->ifname, "[%c] failed to up with SSID: \"%s\"\n",
+ cur_if->prefix, cur_if->ssid);
+ } else {
+ wl_ext_ioctl(cur_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+ chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ WL_MSG(cur_if->ifname, "[%c] enabled with SSID: \"%s\" on channel %d\n",
+ cur_if->prefix, ssid.SSID, chan);
+ }
+ wl_clr_isam_status(cur_if, AP_CREATING);
+
wl_ext_isam_status(cur_if->dev, NULL, 0);
return 0;
}
static int
-wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
+wl_ext_disable_iface(struct net_device *dev, char *ifname)
{
struct dhd_pub *dhd = dhd_get_pub(dev);
- char *pch, *pick_tmp, *param;
+ int i;
s8 iovar_buf[WLC_IOCTL_SMLEN];
wlc_ssid_t ssid = { 0, {0} };
scb_val_t scbval;
} bss_setbuf;
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
apstamode_t apstamode = apsta_params->apstamode;
- char ifname[IFNAMSIZ+1];
- struct wl_if_info *cur_if = NULL;
- int i;
-
- if (!apsta_params->init) {
- ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
- return -1;
- }
-
- ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
-
- pick_tmp = command;
- param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
- param = bcmstrtok(&pick_tmp, " ", 0);
- while (param != NULL) {
- if (!strcmp(param, "ifname")) {
- pch = bcmstrtok(&pick_tmp, " ", 0);
- if (pch)
- strcpy(ifname, pch);
- else {
- ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
- return -1;
- }
- }
- param = bcmstrtok(&pick_tmp, " ", 0);
- }
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].dev &&
- !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
- cur_if = &apsta_params->if_info[i];
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
break;
}
}
if (!cur_if) {
- ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
+ AEXT_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
return -1;
}
- ANDROID_MSG(("%s: %s[%c] Disabling\n", __FUNCTION__, ifname, cur_if->prefix));
+ mutex_lock(&apsta_params->usr_sync);
+ WL_MSG(ifname, "[%c] Disabling...\n", cur_if->prefix);
if (cur_if->ifmode == ISTA_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); // reset ssid
wl_ext_iovar_setint(dev, "mpc", 1);
- } else if ((apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) &&
+ } else if ((apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) &&
cur_if->ifmode == IAP_MODE) {
bss_setbuf.cfg = 0xffffffff;
bss_setbuf.val = htod32(0);
wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
#ifdef WLMESH
- } else if (apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
- apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE ||
+ } else if (apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE ||
apstamode == ISTAAPAP_MODE) {
bss_setbuf.cfg = 0xffffffff;
bss_setbuf.val = htod32(0);
}
#ifdef WLMESH
if ((cur_if->ifmode == IMESH_MODE) &&
- (apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
- apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE)) {
+ (apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE)) {
int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].dev &&
- apsta_params->if_info[i].ifmode == ISTA_MODE) {
- struct wl_if_info *tmp_if = &apsta_params->if_info[i];
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE) {
wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
&scan_assoc_time, sizeof(scan_assoc_time), 1);
}
}
#endif
- cur_if->ifstate = IF_STATE_DISALBE;
-
- ANDROID_MSG(("%s: %s[%c] disabled\n", __FUNCTION__, ifname, cur_if->prefix));
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ WL_MSG(ifname, "[%c] Exit\n", cur_if->prefix);
+ mutex_unlock(&apsta_params->usr_sync);
return 0;
}
+static int
+wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
+{
+ int ret = 0;
+ char *pch, *pick_tmp, *param;
+ char ifname[IFNAMSIZ+1];
+
+ AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ if (!strcmp(param, "ifname")) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ strcpy(ifname, pch);
+ ret = wl_ext_disable_iface(dev, ifname);
+ if (ret)
+ return ret;
+ }
+ else {
+ AEXT_ERROR(dev->name, "ifname [wlanX]\n");
+ return -1;
+ }
+ }
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ }
+
+ return ret;
+}
+
static bool
wl_ext_diff_band(uint16 chan1, uint16 chan2)
{
target_chan = wl_ext_get_chan(apsta_params, target_if->dev);
if (target_chan) {
- ANDROID_INFO(("%s: cur_chan=%d, target_chan=%d\n", __FUNCTION__,
- cur_chan, target_chan));
+ AEXT_INFO(cur_if->ifname, "cur_chan=%d, target_chan=%d\n",
+ cur_chan, target_chan);
if (wl_ext_diff_band(cur_chan, target_chan)) {
if (!apsta_params->rsdb)
return target_chan;
} else {
- if (target_chan != cur_chan)
+ if (cur_chan != target_chan)
return target_chan;
}
}
}
static int
-wl_ext_triger_csa(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+wl_ext_rsdb_core_conflict(struct wl_apsta_params *apsta_params,
+ struct wl_if_info *cur_if)
+{
+ struct wl_if_info *tmp_if;
+ uint16 cur_chan, tmp_chan;
+ int i;
+
+ if (apsta_params->rsdb) {
+ cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if != cur_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ tmp_if->prio > cur_if->prio) {
+ tmp_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+ if (!tmp_chan)
+ continue;
+ if (wl_ext_diff_band(cur_chan, tmp_chan) &&
+ wl_ext_diff_band(cur_chan, cur_if->channel))
+ return TRUE;
+ else if (!wl_ext_diff_band(cur_chan, tmp_chan) &&
+ wl_ext_diff_band(cur_chan, cur_if->channel))
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+static int
+wl_ext_trigger_csa(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
{
s8 iovar_buf[WLC_IOCTL_SMLEN];
+ bool core_conflict = FALSE;
if (apsta_params->csa & CSA_DRV_BIT &&
(cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE)) {
if (!cur_if->channel) {
- ANDROID_MSG(("%s: %s[%c] skip channel %d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, cur_if->channel));
- } else if (cur_if->channel >= 52 && cur_if->channel <= 148) {
- ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, cur_if->channel));
+ WL_MSG(cur_if->ifname, "[%c] skip channel %d\n",
+ cur_if->prefix, cur_if->channel);
+ } else if (wl_ext_dfs_chan(apsta_params, cur_if->dev, cur_if->channel)) {
+ WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
+ cur_if->prefix, cur_if->channel);
wl_ext_if_down(apsta_params, cur_if);
} else {
wl_chan_switch_t csa_arg;
memset(&csa_arg, 0, sizeof(csa_arg));
csa_arg.mode = 1;
csa_arg.count = 3;
- csa_arg.chspec = wl_ext_get_chanspec(apsta_params, cur_if->dev,
+ csa_arg.chspec = wl_ext_chan_to_chanspec(apsta_params, cur_if->dev,
cur_if->channel);
- if (csa_arg.chspec) {
- ANDROID_MSG(("%s: Trigger CSA to channel %d(0x%x)\n", __FUNCTION__,
- cur_if->channel, csa_arg.chspec));
+ core_conflict = wl_ext_rsdb_core_conflict(apsta_params, cur_if);
+ if (core_conflict) {
+ WL_MSG(cur_if->ifname, "[%c] Skip CSA due to rsdb core conflict\n",
+ cur_if->prefix);
+ } else if (csa_arg.chspec) {
+ WL_MSG(cur_if->ifname, "[%c] Trigger CSA to channel %d(0x%x)\n",
+ cur_if->prefix, cur_if->channel, csa_arg.chspec);
+ wl_set_isam_status(cur_if, AP_CREATING);
wl_ext_iovar_setbuf(cur_if->dev, "csa", &csa_arg, sizeof(csa_arg),
iovar_buf, sizeof(iovar_buf), NULL);
OSL_SLEEP(500);
+ wl_clr_isam_status(cur_if, AP_CREATING);
wl_ext_isam_status(cur_if->dev, NULL, 0);
} else {
- ANDROID_ERROR(("%s: fail to get chanspec\n", __FUNCTION__));
+ AEXT_ERROR(cur_if->ifname, "fail to get chanspec\n");
}
}
}
max_prio = cur_if->prio;
for (i=0; i<MAX_IF_NUM; i++) {
tmp_if = &apsta_params->if_info[i];
- if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
tmp_if->prio > max_prio) {
tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
if (tmp_chan) {
tmp_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
if (apsta_params->rsdb && tmp_chan &&
wl_ext_diff_band(tmp_chan, target_chan)) {
- ANDROID_MSG(("%s: %s[%c] keep on current channel %d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, tmp_chan));
+ WL_MSG(cur_if->ifname, "[%c] keep on current channel %d\n",
+ cur_if->prefix, tmp_chan);
cur_if->channel = 0;
} else {
- ANDROID_MSG(("%s: %s[%c] channel=%d => %s[%c] channel=%d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, cur_if->channel,
- target_if->ifname, target_if->prefix, target_chan));
+ WL_MSG(cur_if->ifname, "[%c] channel=%d => %s[%c] channel=%d\n",
+ cur_if->prefix, cur_if->channel,
+ target_if->ifname, target_if->prefix, target_chan);
cur_if->channel = target_chan;
}
}
exit:
if ((cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) &&
- (cur_if->channel >= 52 && cur_if->channel <= 148)) {
- ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, cur_if->channel));
+ wl_ext_dfs_chan(apsta_params, cur_if->dev, cur_if->channel)) {
+ WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
+ cur_if->prefix, cur_if->channel);
cur_if->channel = 0;
}
cur_prio = cur_if->prio;
for (i=0; i<MAX_IF_NUM; i++) {
tmp_if = &apsta_params->if_info[i];
- if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
+ if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
tmp_if->prio >= max_prio && tmp_if->prio <= cur_prio) {
tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
if (tmp_chan) {
}
if (target_if) {
- ANDROID_MSG(("%s: %s channel=%d => %s channel=%d\n", __FUNCTION__,
- target_if->ifname, target_chan, cur_if->ifname, cur_if->channel));
+ WL_MSG(target_if->ifname, "channel=%d => %s channel=%d\n",
+ target_chan, cur_if->ifname, cur_if->channel);
target_if->channel = cur_if->channel;
if (apsta_params->csa == 0) {
wl_ext_if_down(apsta_params, target_if);
wl_ext_if_up(apsta_params, target_if);
}
} else {
- wl_ext_triger_csa(apsta_params, target_if);
+ wl_ext_trigger_csa(apsta_params, target_if);
}
}
wl_ext_enable_iface(struct net_device *dev, char *ifname)
{
struct dhd_pub *dhd = dhd_get_pub(dev);
- int i;
+ int i, ret = 0;
s8 iovar_buf[WLC_IOCTL_SMLEN];
wlc_ssid_t ssid = { 0, {0} };
chanspec_t fw_chspec;
} bss_setbuf;
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
apstamode_t apstamode = apsta_params->apstamode;
- struct wl_if_info *cur_if = NULL;
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
uint16 cur_chan;
struct wl_conn_info conn_info;
+ u32 timeout;
for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].dev &&
- !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
- cur_if = &apsta_params->if_info[i];
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
+ cur_if = tmp_if;
break;
}
}
if (!cur_if) {
- ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
+ AEXT_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
return -1;
}
+ mutex_lock(&apsta_params->usr_sync);
+
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_set_isam_status(cur_if, STA_CONNECTING);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_set_isam_status(cur_if, AP_CREATING);
+ }
+
wl_ext_isam_status(cur_if->dev, NULL, 0);
- ANDROID_MSG(("%s: %s[%c] Enabling\n", __FUNCTION__, ifname, cur_if->prefix));
+ WL_MSG(ifname, "[%c] Enabling...\n", cur_if->prefix);
+
+ wl_ext_check_other_enabling(apsta_params, cur_if);
wl_ext_move_cur_channel(apsta_params, dev, cur_if);
if (!cur_if->channel && cur_if->ifmode != ISTA_MODE) {
- return 0;
+ goto exit;
}
cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
if (cur_chan) {
- ANDROID_INFO(("%s: Associated!\n", __FUNCTION__));
+ AEXT_INFO(cur_if->ifname, "Associated\n");
if (cur_chan != cur_if->channel) {
- wl_ext_triger_csa(apsta_params, cur_if);
+ wl_ext_trigger_csa(apsta_params, cur_if);
}
- return 0;
+ goto exit;
+ }
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_clr_isam_status(cur_if, STA_CONNECTED);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_clr_isam_status(cur_if, AP_CREATED);
}
wl_ext_move_other_channel(apsta_params, dev, cur_if);
wl_ext_iovar_setint(dev, "mpc", 0);
if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- } else if (apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) {
+ } else if (apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) {
wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "ssid", &ssid, sizeof(ssid),
iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL);
}
if (cur_if->hidden > 0) {
wl_ext_ioctl(cur_if->dev, WLC_SET_CLOSED, &cur_if->hidden,
sizeof(cur_if->hidden), 1);
- ANDROID_MSG(("%s: Broadcast SSID: %s\n", __FUNCTION__,
- cur_if->hidden ? "OFF":"ON"));
+ WL_MSG(ifname, "[%c] Broadcast SSID: %s\n",
+ cur_if->prefix, cur_if->hidden ? "OFF":"ON");
}
}
} else if (apstamode == IAPONLY_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- } else if (apstamode == IAPSTA_MODE || apstamode == IGOSTA_MODE) {
+ } else if (apstamode == ISTAAP_MODE || apstamode == ISTAGO_MODE) {
if (cur_if->ifmode == ISTA_MODE) {
wl_ext_connect(cur_if->dev, &conn_info);
} else {
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
- if (!apsta_params->rsdb && !disable_proptx) {
+ if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
bool enabled;
dhd_wlfc_get_enable(dhd, &enabled);
if (!enabled) {
} else if (cur_if->ifmode == IAP_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
} else {
- ANDROID_ERROR(("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode));
+ AEXT_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
}
#ifdef WLMESH
} else if (apstamode == IMESHONLY_MODE ||
- apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
- apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE) {
+ apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE) {
if (cur_if->ifmode == ISTA_MODE) {
wl_ext_connect(cur_if->dev, &conn_info);
} else if (cur_if->ifmode == IAP_MODE) {
join_params.params.chanspec_num = 1;
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params, sizeof(join_params), 1);
} else {
- ANDROID_ERROR(("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode));
+ AEXT_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
}
#endif
}
-#ifdef WLMESH
- if ((cur_if->ifmode == IMESH_MODE) &&
- (apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
- apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE)) {
+
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ wl_get_isam_status(cur_if, AP_CREATED),
+ msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
+ wl_ext_disable_iface(dev, cur_if->ifname);
+ WL_MSG(ifname, "[%c] failed to enable with SSID: \"%s\"\n",
+ cur_if->prefix, cur_if->ssid);
+ ret = -1;
+ }
+ }
+
+ if (wl_get_isam_status(cur_if, AP_CREATED) &&
+ (cur_if->ifmode == IMESH_MODE || cur_if->ifmode == IAP_MODE) &&
+ (apstamode == ISTAAP_MODE || apstamode == ISTAAPAP_MODE ||
+ apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE)) {
int scan_assoc_time = 80;
for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].dev &&
- apsta_params->if_info[i].ifmode == ISTA_MODE) {
- struct wl_if_info *tmp_if = &apsta_params->if_info[i];
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE) {
wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
&scan_assoc_time, sizeof(scan_assoc_time), 1);
}
}
}
-#endif
- OSL_SLEEP(500);
- ANDROID_MSG(("%s: %s[%c] enabled with SSID: \"%s\"\n", __FUNCTION__,
- ifname, cur_if->prefix, cur_if->ssid));
wl_ext_isam_status(cur_if->dev, NULL, 0);
- cur_if->ifstate = IF_STATE_ENABLE;
-
- return 0;
+exit:
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ wl_clr_isam_status(cur_if, AP_CREATING);
+ }
+ WL_MSG(ifname, "[%c] Exit\n", cur_if->prefix);
+ mutex_unlock(&apsta_params->usr_sync);
+ return ret;
}
static int
wl_ext_iapsta_enable(struct net_device *dev, char *command, int total_len)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
int ret = 0;
char *pch, *pick_tmp, *param;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
char ifname[IFNAMSIZ+1];
- if (!apsta_params->init) {
- ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
- return -1;
- }
-
- ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_enable
if (ret)
return ret;
} else {
- ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "ifname [wlanX]\n");
return -1;
}
}
{
struct dhd_pub *dhd = dhd_get_pub(dev);
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if;
- int i;
if (apsta_params->init == TRUE) {
- ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "don't init twice\n");
return -1;
}
- ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
+ AEXT_TRACE(dev->name, "Enter\n");
+
+ apsta_params->init = TRUE;
+
+ return 0;
+}
+
+int
+wl_ext_iapsta_alive_postinit(struct net_device *dev)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ s32 apsta = 0;
+ struct wl_if_info *cur_if;
+ int i;
+
+ wl_ext_iovar_getint(dev, "apsta", &apsta);
+ if (apsta == 1) {
+ apsta_params->apstamode = ISTAONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ op_mode = DHD_FLAG_STA_MODE;
+ } else {
+ apsta_params->apstamode = IAPONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+ op_mode = DHD_FLAG_HOSTAP_MODE;
+ }
+ // fix me: how to check it's ISTAAP_MODE or IDUALAP_MODE?
+
+ wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+ WL_MSG(dev->name, "apstamode=%d\n", apsta_params->apstamode);
for (i=0; i<MAX_IF_NUM; i++) {
cur_if = &apsta_params->if_info[i];
if (cur_if->ifmode == ISTA_MODE) {
cur_if->channel = 0;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
+ wl_set_isam_status(cur_if, IF_READY);
cur_if->prio = PRIO_STA;
cur_if->prefix = 'S';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
} else if (cur_if->ifmode == IAP_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
+ wl_set_isam_status(cur_if, IF_READY);
cur_if->prio = PRIO_AP;
cur_if->prefix = 'A';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
} else if (cur_if->ifmode == IMESH_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
- cur_if->ifstate = IF_STATE_INIT;
+ wl_set_isam_status(cur_if, IF_READY);
cur_if->prio = PRIO_MESH;
cur_if->prefix = 'M';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
}
}
- apsta_params->init = TRUE;
-
- return 0;
-}
-
-int
-wl_ext_iapsta_alive_postinit(struct net_device *dev)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- s32 apsta = 0;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
-
- wl_ext_iovar_getint(dev, "apsta", &apsta);
- if (apsta == 1) {
- apsta_params->apstamode = ISTAONLY_MODE;
- apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
- op_mode = DHD_FLAG_STA_MODE;
- } else {
- apsta_params->apstamode = IAPONLY_MODE;
- apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
- op_mode = DHD_FLAG_HOSTAP_MODE;
- }
- // fix me: how to check it's IAPSTA_MODE or IDUALAP_MODE?
-
- wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
- ANDROID_MSG(("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode));
-
return op_mode;
}
-#if defined(WL_WIRELESS_EXT)
-static uint32
-wl_ext_event_str(uint32 event_type, uint32 status, uint32 reason,
- char* stringBuf, uint buflen, void* data, uint32 datalen)
+int
+wl_ext_iapsta_event(struct net_device *dev,
+ struct wl_apsta_params *apsta_params, wl_event_msg_t *e, void* data)
{
+ struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
int i;
- uint32 event_len = 0;
-
-#ifdef SENDPROB
- uint32 event_id[] = {DOT11_MNG_VS_ID};
- typedef struct probreq_event_map_t {
- uint8 event;
- uint8 mgmt[DOT11_MGMT_HDR_LEN];
- uint8 data[1]; // IE list
- } probreq_event_map_t;
-#endif
-
- typedef struct conn_fail_event_map_t {
- uint32 inEvent; /* input: event type to match */
- uint32 inStatus; /* input: event status code to match */
- uint32 inReason; /* input: event reason code to match */
- } conn_fail_event_map_t;
-
- /* Map of WLC_E events to connection failure strings */
-#define WL_IW_DONT_CARE 9999
- const conn_fail_event_map_t event_map[] = {
- /* inEvent inStatus inReason */
- {WLC_E_LINK, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_DEAUTH, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_OVERLAY_REQ, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
- {WLC_E_ASSOC_IND, WL_IW_DONT_CARE, DOT11_SC_SUCCESS},
- {WLC_E_REASSOC_IND, WL_IW_DONT_CARE, DOT11_SC_SUCCESS},
- };
+ uint32 event_type = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+ uint16 flags = ntoh16(e->flags);
-#ifdef SENDPROB
- if (event_type == WLC_E_PROBREQ_MSG) {
- bcm_tlv_t *ie;
- probreq_event_map_t *pevent = (probreq_event_map_t *)stringBuf;
- char *pbuf;
- int buf_len = buflen;
- int num_ie = 0;
- int totlen;
-
- pevent->event = event_type;
- memcpy(pevent->mgmt, data, DOT11_MGMT_HDR_LEN);
- buf_len -= (DOT11_MGMT_HDR_LEN+1);
- datalen -= DOT11_MGMT_HDR_LEN;
- data += DOT11_MGMT_HDR_LEN;
-
- pbuf = pevent->data;
-#if 1 // non-sort by id
- ie = (bcm_tlv_t*)data;
- totlen = datalen;
- while (ie && totlen >= TLV_HDR_LEN) {
- int ie_id = -1;
- int ie_len = ie->len + TLV_HDR_LEN;
- for (i=0; i<sizeof(event_id)/sizeof(event_id[0]); i++) {
- if (ie->id == event_id[i]) {
- ie_id = ie->id;
- break;
- }
- }
- if ((ie->id == ie_id) && (totlen >= ie_len) && (buf_len >= ie_len)) {
- memcpy(pbuf, ie, ie_len);
- pbuf += ie_len;
- buf_len -= ie_len;
- num_ie++;
- }
- ie = (bcm_tlv_t*)((uint8*)ie + ie_len);
- totlen -= ie_len;
- }
-#else // sort by id
- for (i = 0; i < sizeof(event_id)/sizeof(event_id[0]); i++) {
- void *pdata = data;
- int data_len = datalen;
- while (buf_len > 0) {
- ie = bcm_parse_tlvs(pdata, data_len, event_id[i]);
- if (!ie)
- break;
- if (buf_len < (ie->len+TLV_HDR_LEN)) {
- ANDROID_TRACE(("%s: buffer is not enough\n", __FUNCTION__));
- break;
- }
- memcpy(pbuf, ie, min(ie->len+TLV_HDR_LEN, buf_len));
- pbuf += (ie->len+TLV_HDR_LEN);
- buf_len -= (ie->len+TLV_HDR_LEN);
- data_len -= (((void *)ie-pdata) + (ie->len+TLV_HDR_LEN));
- pdata = (char *)ie + (ie->len+TLV_HDR_LEN);
- num_ie++;
- }
- }
-#endif
- if (num_ie)
- event_len = buflen - buf_len;
- return event_len;
- }
-#endif
-
- /* Search the event map table for a matching event */
- for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
- const conn_fail_event_map_t* row = &event_map[i];
- if (row->inEvent == event_type &&
- (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
- (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
- memset(stringBuf, 0, buflen);
- event_len += snprintf(stringBuf, buflen, "isam_event event=%d reason=%d",
- event_type, reason);
- return event_len;
- }
- }
-
- return event_len;
-}
-#endif /* WL_WIRELESS_EXT */
-
-int
-wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL;
- int i;
-#if defined(WL_WIRELESS_EXT)
- char extra[IW_CUSTOM_MAX];
- union iwreq_data wrqu;
- uint32 datalen = ntoh32(e->datalen);
-#endif
- uint32 event_type = ntoh32(e->event_type);
- uint32 status = ntoh32(e->status);
- uint32 reason = ntoh32(e->reason);
- uint16 flags = ntoh16(e->flags);
-
- if (!apsta_params->init) {
- return -1;
- }
-
- for (i=0; i<MAX_IF_NUM; i++) {
- if (apsta_params->if_info[i].ifidx == e->ifidx) {
- cur_if = &apsta_params->if_info[i];
- break;
+ for (i=0; i<MAX_IF_NUM; i++) {
+ tmp_if = &apsta_params->if_info[i];
+ if (tmp_if->dev == dev) {
+ cur_if = tmp_if;
+ break;
}
}
if (!cur_if || !cur_if->dev) {
- ANDROID_ERROR(("%s: %s ifidx %d is not ready\n", __FUNCTION__,
- dev->name, e->ifidx));
+ AEXT_DBG(dev->name, "ifidx %d is not ready\n", e->ifidx);
return -1;
}
if (cur_if->ifmode == ISTA_MODE) {
if (event_type == WLC_E_LINK) {
if (!(flags & WLC_EVENT_MSG_LINK)) {
- ANDROID_MSG(("%s: %s[%c] Link Down with %pM\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, &e->addr));
+ WL_MSG(cur_if->ifname,
+ "[%c] Link down with %pM, %s(%d), reason %d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
+ wl_clr_isam_status(cur_if, STA_CONNECTED);
} else {
- ANDROID_MSG(("%s: %s[%c] Link UP with %pM\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, &e->addr));
+ WL_MSG(cur_if->ifname, "[%c] Link UP with %pM\n",
+ cur_if->prefix, &e->addr);
+ wl_set_isam_status(cur_if, STA_CONNECTED);
}
+ wl_clr_isam_status(cur_if, STA_CONNECTING);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+ } else if (event_type == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) {
+ WL_MSG(cur_if->ifname,
+ "connect failed event=%d, reason=%d, status=%d\n",
+ event_type, reason, status);
+ wl_clr_isam_status(cur_if, STA_CONNECTING);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+ } else if (event_type == WLC_E_DEAUTH || event_type == WLC_E_DEAUTH_IND ||
+ event_type == WLC_E_DISASSOC || event_type == WLC_E_DISASSOC_IND) {
+ WL_MSG(cur_if->ifname, "[%c] Link down with %pM, %s(%d), reason %d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
}
}
else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
if ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
reason == WLC_E_REASON_INITIAL_ASSOC)) {
- ANDROID_MSG(("%s: %s[%c] Link up\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix));
+ if (wl_get_isam_status(cur_if, AP_CREATING)) {
+ WL_MSG(cur_if->ifname, "[%c] Link up (etype=%d)\n",
+ cur_if->prefix, event_type);
+ wl_set_isam_status(cur_if, AP_CREATED);
+ wake_up_interruptible(&apsta_params->netif_change_event);
+ } else {
+ WL_MSG(cur_if->ifname, "[%c] Link up w/o creating? (etype=%d)\n",
+ cur_if->prefix, event_type);
+ }
} else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
reason == WLC_E_REASON_DEAUTH)) {
- ANDROID_MSG(("%s: %s[%c] Link down\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix));
+ wl_clr_isam_status(cur_if, AP_CREATED);
+ WL_MSG(cur_if->ifname, "[%c] Link down, reason=%d\n",
+ cur_if->prefix, reason);
}
else if ((event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
reason == DOT11_SC_SUCCESS) {
- ANDROID_MSG(("%s: %s[%c] connected device %pM\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, &e->addr));
- } else if (event_type == WLC_E_DISASSOC_IND) {
- ANDROID_MSG(("%s: %s[%c] disassociated device %pM\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, &e->addr));
- } else if (event_type == WLC_E_DEAUTH_IND ||
+ WL_MSG(cur_if->ifname, "[%c] connected device %pM\n",
+ cur_if->prefix, &e->addr);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
+ } else if (event_type == WLC_E_DISASSOC_IND ||
+ event_type == WLC_E_DEAUTH_IND ||
(event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
- ANDROID_MSG(("%s: %s[%c] deauthenticated device %pM\n", __FUNCTION__,
- cur_if->ifname, cur_if->prefix, &e->addr));
+ WL_MSG(cur_if->ifname,
+ "[%c] disconnected device %pM, %s(%d), reason=%d\n",
+ cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
+ event_type, reason);
+ wl_ext_isam_status(cur_if->dev, NULL, 0);
}
}
-#if defined(WL_WIRELESS_EXT)
- memset(extra, 0, sizeof(extra));
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wrqu.data.length = wl_ext_event_str(event_type, status, reason,
- extra, sizeof(extra), data, datalen);
- if (wrqu.data.length) {
- wireless_send_event(cur_if->dev, IWEVCUSTOM, &wrqu, extra);
- ANDROID_EVENT(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d sent up\n",
- __FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
- reason, flags));
- } else
-#endif /* WL_WIRELESS_EXT */
- {
- ANDROID_EVENT(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d\n",
- __FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
- reason, flags));
- }
-
return 0;
}
u32
-wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel)
+wl_ext_iapsta_update_channel(dhd_pub_t *dhd, struct net_device *dev,
+ u32 channel)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
int i;
}
if (cur_if) {
+ if (cur_if->ifmode == ISTA_MODE)
+ wl_set_isam_status(cur_if, STA_CONNECTING);
wl_ext_isam_status(cur_if->dev, NULL, 0);
cur_if->channel = channel;
channel = wl_ext_move_cur_channel(apsta_params,
return channel;
}
+#ifdef WL_CFG80211
+void
+wl_ext_iapsta_update_iftype(struct net_device *net, int ifidx, int wl_iftype)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ AEXT_TRACE(net->name, "ifidx=%d, wl_iftype=%d\n", ifidx, wl_iftype);
+
+ if (ifidx < MAX_IF_NUM) {
+ cur_if = &apsta_params->if_info[ifidx];
+ }
+
+ if (cur_if) {
+ if (wl_iftype == WL_IF_TYPE_STA) {
+ cur_if->ifmode = ISTA_MODE;
+ cur_if->prio = PRIO_STA;
+ cur_if->prefix = 'S';
+ } else if (wl_iftype == WL_IF_TYPE_AP) {
+ cur_if->ifmode = IAP_MODE;
+ cur_if->prio = PRIO_AP;
+ cur_if->prefix = 'A';
+ }
+ }
+}
+#endif
+
+#ifdef WL_STATIC_IF
+void
+wl_ext_iapsta_ifadding(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_if_info *cur_if = NULL;
+
+ AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
+ if (ifidx < MAX_IF_NUM) {
+ cur_if = &apsta_params->if_info[ifidx];
+ wl_set_isam_status(cur_if, IF_ADDING);
+ }
+}
+#endif
+
+static void
+wl_ext_iapsta_postinit(struct net_device *net, struct wl_if_info *cur_if)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ int pm;
+
+ AEXT_TRACE(cur_if->ifname, "ifidx=%d\n", cur_if->ifidx);
+ if (cur_if->ifidx == 0) {
+ apsta_params->csa |= CSA_FW_BIT;
+ if (dhd->conf->chip == BCM4359_CHIP_ID) {
+ apsta_params->rsdb = TRUE;
+ }
+ if (dhd->conf->fw_type == FW_TYPE_MESH) {
+ apsta_params->csa |= (CSA_FW_BIT | CSA_DRV_BIT);
+ }
+ } else {
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_iovar_setint(cur_if->dev, "roam_off", dhd->conf->roam_off);
+ wl_ext_iovar_setint(cur_if->dev, "bcn_timeout", dhd->conf->bcn_timeout);
+ if (dhd->conf->pm >= 0)
+ pm = dhd->conf->pm;
+ else
+ pm = PM_FAST;
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ wl_ext_iovar_setint(cur_if->dev, "assoc_retry_max", 30);
+ }
+#ifdef WLMESH
+ else if (cur_if->ifmode == IMESH_MODE) {
+ pm = 0;
+ wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ }
+#endif
+ }
+
+}
+
int
wl_ext_iapsta_attach_name(struct net_device *net, int ifidx)
{
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
struct wl_if_info *cur_if = NULL;
- ANDROID_TRACE(("%s: ifidx=%d, %s\n", __FUNCTION__, ifidx, net->name));
+ AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
if (ifidx < MAX_IF_NUM) {
cur_if = &apsta_params->if_info[ifidx];
}
if (ifidx == 0) {
- if (dhd->conf->fw_type == FW_TYPE_MESH) {
- apsta_params->rsdb = TRUE;
- apsta_params->csa = CSA_FW_BIT | CSA_DRV_BIT;
- }
strcpy(cur_if->ifname, net->name);
- } else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
+ wl_ext_iapsta_postinit(net, cur_if);
+ wl_set_isam_status(cur_if, IF_READY);
+ } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
strcpy(cur_if->ifname, net->name);
- apsta_params->netif_change = TRUE;
+ wl_ext_iapsta_postinit(net, cur_if);
+ wl_clr_isam_status(cur_if, IF_ADDING);
+ wl_set_isam_status(cur_if, IF_READY);
+#ifndef WL_STATIC_IF
wake_up_interruptible(&apsta_params->netif_change_event);
+#endif
}
return 0;
struct wl_apsta_params *apsta_params = dhd->iapsta_params;
struct wl_if_info *cur_if = NULL, *primary_if;
- ANDROID_MSG(("%s: ifidx=%d, bssidx=%d\n", __FUNCTION__, ifidx, bssidx));
+ AEXT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
if (ifidx < MAX_IF_NUM) {
cur_if = &apsta_params->if_info[ifidx];
}
cur_if->dev = net;
cur_if->ifidx = ifidx;
cur_if->bssidx = bssidx;
+ cur_if->ifmode = ISTA_MODE;
+ cur_if->prio = PRIO_STA;
+ cur_if->prefix = 'S';
+ wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
+ apsta_params, PRIO_EVENT_IAPSTA);
strcpy(cur_if->ifname, net->name);
init_waitqueue_head(&apsta_params->netif_change_event);
- } else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
+ mutex_init(&apsta_params->usr_sync);
+ } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
primary_if = &apsta_params->if_info[IF_PIF];
cur_if->dev = net;
cur_if->ifidx = ifidx;
cur_if->bssidx = bssidx;
+ wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
+ apsta_params, PRIO_EVENT_IAPSTA);
if (strlen(cur_if->ifname)) {
memset(net->name, 0, sizeof(IFNAMSIZ));
strcpy(net->name, cur_if->ifname);
net->name[IFNAMSIZ-1] = '\0';
}
- if (apsta_params->apstamode != ISTAAPAP_MODE) {
+#ifndef WL_STATIC_IF
+ if (apsta_params->apstamode != ISTAAPAP_MODE &&
+ apsta_params->apstamode != ISTASTA_MODE) {
memcpy(net->dev_addr, primary_if->dev->dev_addr, ETHER_ADDR_LEN);
net->dev_addr[0] |= 0x02;
if (ifidx >= 2) {
net->dev_addr[5] += (ifidx-1);
}
}
- if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_iovar_setint(net, "roam_off", dhd->conf->roam_off);
- wl_ext_iovar_setint(net, "bcn_timeout", dhd->conf->bcn_timeout);
-#ifdef WLMESH
- } else if (cur_if->ifmode == IMESH_MODE) {
- int pm = 0;
- wl_ext_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), 1);
#endif
- }
}
return 0;
if (!apsta_params)
return 0;
- ANDROID_MSG(("%s: ifidx=%d\n", __FUNCTION__, ifidx));
+ AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
if (ifidx < MAX_IF_NUM) {
cur_if = &apsta_params->if_info[ifidx];
}
if (ifidx == 0) {
+ wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
memset(apsta_params, 0, sizeof(struct wl_apsta_params));
- } else if (cur_if && cur_if->ifstate >= IF_STATE_INIT) {
+ } else if (cur_if && wl_get_isam_status(cur_if, IF_READY)) {
+ wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
memset(cur_if, 0, sizeof(struct wl_if_info));
}
return 0;
}
-int wl_ext_iapsta_attach(dhd_pub_t *pub)
+int
+wl_ext_iapsta_attach(dhd_pub_t *pub)
{
struct wl_apsta_params *iapsta_params;
iapsta_params = kzalloc(sizeof(struct wl_apsta_params), GFP_KERNEL);
if (unlikely(!iapsta_params)) {
- ANDROID_ERROR(("%s: Could not allocate apsta_params\n", __FUNCTION__));
+ AEXT_ERROR("wlan", "Could not allocate apsta_params\n");
return -ENOMEM;
}
pub->iapsta_params = (void *)iapsta_params;
return 0;
}
-void wl_ext_iapsta_dettach(dhd_pub_t *pub)
+void
+wl_ext_iapsta_dettach(dhd_pub_t *pub)
{
if (pub->iapsta_params) {
kfree(pub->iapsta_params);
#endif
#ifdef IDHCP
-int
-wl_ext_ip_dump(int ip, char *buf)
+/*
+terence 20190409:
+dhd_priv wl dhcpc_dump
+dhd_priv wl dhcpc_param <client ip> <server ip> <lease time>
+*/
+static int
+wl_ext_dhcpc_dump(struct net_device *dev, char *data, char *command,
+ int total_len)
{
- unsigned char bytes[4];
- int bytes_written=-1;
+ int ret = 0;
+ int bytes_written = 0;
+ uint32 ip_addr;
+ char buf[20]="";
- bytes[0] = ip & 0xFF;
- bytes[1] = (ip >> 8) & 0xFF;
- bytes[2] = (ip >> 16) & 0xFF;
- bytes[3] = (ip >> 24) & 0xFF;
- bytes_written = sprintf(buf, "%d.%d.%d.%d", bytes[0], bytes[1], bytes[2], bytes[3]);
+ if (!data) {
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "ipaddr %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "mask %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "gw %s ", buf);
+ }
+
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
+ if (!ret) {
+ bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "dnsserv %s ", buf);
+ }
+
+ if (!bytes_written)
+ bytes_written = -1;
+
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ }
return bytes_written;
}
-/*
-terence 20170215:
-dhd_priv dhcpc_dump ifname [wlan0|wlan1]
-dhd_priv dhcpc_enable [0|1]
-*/
int
-wl_ext_dhcpc_enable(struct net_device *dev, char *command, int total_len)
+wl_ext_dhcpc_param(struct net_device *dev, char *data, char *command,
+ int total_len)
{
- int enable = -1, ret = -1;
- int bytes_written = -1;
-
- ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+ int ret = -1, bytes_written = 0;
+ char ip_addr_str[20]="", ip_serv_str[20]="";
+ struct dhcpc_parameter dhcpc_param;
+ uint32 ip_addr, ip_serv, lease_time;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
- sscanf(command, "%*s %d", &enable);
+ if (data) {
+ AEXT_TRACE(dev->name, "cmd %s", command);
+ sscanf(data, "%s %s %d", ip_addr_str, ip_serv_str, &lease_time);
+ AEXT_TRACE(dev->name, "ip_addr = %s, ip_serv = %s, lease_time = %d",
+ ip_addr_str, ip_serv_str, lease_time);
+
+ memset(&dhcpc_param, 0, sizeof(struct dhcpc_parameter));
+ if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_addr)) {
+ AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
+ ret = -1;
+ goto exit;
+ }
+ dhcpc_param.ip_addr = ip_addr;
- if (enable >= 0)
- ret = wl_ext_iovar_setint(dev, "dhcpc_enable", enable);
- else {
- ret = wl_ext_iovar_getint(dev, "dhcpc_enable", &enable);
+ if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_serv)) {
+ AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
+ ret = -1;
+ goto exit;
+ }
+ dhcpc_param.ip_serv = ip_serv;
+ dhcpc_param.lease_time = lease_time;
+ ret = wl_ext_iovar_setbuf(dev, "dhcpc_param", &dhcpc_param,
+ sizeof(struct dhcpc_parameter), iovar_buf, sizeof(iovar_buf), NULL);
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "dhcpc_param", &dhcpc_param,
+ sizeof(struct dhcpc_parameter), iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (!ret) {
- bytes_written = snprintf(command, total_len, "%d", enable);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_addr, ip_addr_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ip_addr %s\n", ip_addr_str);
+ bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_serv, ip_serv_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "ip_serv %s\n", ip_serv_str);
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "lease_time %d\n", dhcpc_param.lease_time);
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
}
- return ret;
+ exit:
+ return ret;
}
+#endif
int
-wl_ext_dhcpc_dump(struct net_device *dev, char *command, int total_len)
+wl_ext_mkeep_alive(struct net_device *dev, char *data, char *command,
+ int total_len)
{
- int ret = 0;
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int ret = -1, i, ifidx, id, period=-1;
+ char *packet = NULL, *buf = NULL;
int bytes_written = 0;
- uint32 ip_addr;
- char buf[20]="";
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
- if (!ret) {
- wl_ext_ip_dump(ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len, "ipaddr %s ", buf);
+ if (data) {
+ buf = kmalloc(total_len, GFP_KERNEL);
+ if (buf == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ packet = kmalloc(total_len, GFP_KERNEL);
+ if (packet == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ AEXT_TRACE(dev->name, "cmd %s", command);
+ sscanf(data, "%d %d %s", &id, &period, packet);
+ AEXT_TRACE(dev->name, "id=%d, period=%d, packet=%s", id, period, packet);
+ if (period >= 0) {
+ ifidx = dhd_net2idx(dhd->info, dev);
+ ret = dhd_conf_mkeep_alive(dhd, ifidx, id, period, packet, FALSE);
+ } else {
+ if (id < 0)
+ id = 0;
+ ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
+ sizeof(buf), NULL);
+ if (!ret) {
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "Id :%d\n"
+ "Period (msec) :%d\n"
+ "Length :%d\n"
+ "Packet :0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes));
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len,
+ "%02x", mkeep_alive_pktp->data[i]);
+ }
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ret = bytes_written;
+ }
+ }
}
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
- if (!ret) {
- wl_ext_ip_dump(ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len, "mask %s ", buf);
- }
+exit:
+ if (buf)
+ kfree(buf);
+ if (packet)
+ kfree(packet);
+ return ret;
+}
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
- if (!ret) {
- wl_ext_ip_dump(ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len, "gw %s ", buf);
+#ifdef WL_EXT_TCPKA
+static int
+wl_ext_tcpka_conn_add(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_t *tcpka = NULL;
+ uint32 sess_id = 0, ipid = 0, srcport = 0, dstport = 0, seq = 0, ack = 0,
+ tcpwin = 0, tsval = 0, tsecr = 0, len = 0, ka_payload_len = 0;
+ char dst_mac[ETHER_ADDR_STR_LEN], src_ip[IPV4_ADDR_STR_LEN],
+ dst_ip[IPV4_ADDR_STR_LEN], ka_payload[32];
+
+ if (data) {
+ memset(dst_mac, 0, sizeof(dst_mac));
+ memset(src_ip, 0, sizeof(src_ip));
+ memset(dst_ip, 0, sizeof(dst_ip));
+ memset(ka_payload, 0, sizeof(ka_payload));
+ sscanf(data, "%d %s %s %s %d %d %d %u %u %d %u %u %u %u %32s",
+ &sess_id, dst_mac, src_ip, dst_ip, &ipid, &srcport, &dstport, &seq,
+ &ack, &tcpwin, &tsval, &tsecr, &len, &ka_payload_len, ka_payload);
+
+ tcpka = kmalloc(sizeof(struct tcpka_conn) + ka_payload_len, GFP_KERNEL);
+ if (tcpka == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
+ sizeof(struct tcpka_conn) + ka_payload_len);
+ goto exit;
+ }
+ memset(tcpka, 0, sizeof(struct tcpka_conn) + ka_payload_len);
+
+ tcpka->sess_id = sess_id;
+ if (!(ret = bcm_ether_atoe(dst_mac, &tcpka->dst_mac))) {
+ AEXT_ERROR(dev->name, "mac parsing err addr=%s\n", dst_mac);
+ goto exit;
+ }
+ if (!bcm_atoipv4(src_ip, &tcpka->src_ip)) {
+ AEXT_ERROR(dev->name, "src_ip parsing err ip=%s\n", src_ip);
+ goto exit;
+ }
+ if (!bcm_atoipv4(dst_ip, &tcpka->dst_ip)) {
+ AEXT_ERROR(dev->name, "dst_ip parsing err ip=%s\n", dst_ip);
+ goto exit;
+ }
+ tcpka->ipid = ipid;
+ tcpka->srcport = srcport;
+ tcpka->dstport = dstport;
+ tcpka->seq = seq;
+ tcpka->ack = ack;
+ tcpka->tcpwin = tcpwin;
+ tcpka->tsval = tsval;
+ tcpka->tsecr = tsecr;
+ tcpka->len = len;
+ tcpka->ka_payload_len = ka_payload_len;
+ strncpy(tcpka->ka_payload, ka_payload, ka_payload_len);
+
+ AEXT_INFO(dev->name,
+ "tcpka_conn_add %d %pM %pM %pM %d %d %d %u %u %d %u %u %u %u \"%s\"\n",
+ tcpka->sess_id, &tcpka->dst_mac, &tcpka->src_ip, &tcpka->dst_ip,
+ tcpka->ipid, tcpka->srcport, tcpka->dstport, tcpka->seq,
+ tcpka->ack, tcpka->tcpwin, tcpka->tsval, tcpka->tsecr,
+ tcpka->len, tcpka->ka_payload_len, tcpka->ka_payload);
+
+ ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_add", (char *)tcpka,
+ (sizeof(tcpka_conn_t) + tcpka->ka_payload_len - 1),
+ iovar_buf, sizeof(iovar_buf), NULL);
}
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
- if (!ret) {
- wl_ext_ip_dump(ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len, "dnsserv %s ", buf);
+exit:
+ if (tcpka)
+ kfree(tcpka);
+ return ret;
+}
+
+static int
+wl_ext_tcpka_conn_enable(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_sess_t tcpka_conn;
+ int ret;
+ uint32 sess_id = 0, flag, interval = 0, retry_interval = 0, retry_count = 0;
+
+ if (data) {
+ sscanf(data, "%d %d %d %d %d",
+ &sess_id, &flag, &interval, &retry_interval, &retry_count);
+ tcpka_conn.sess_id = sess_id;
+ tcpka_conn.flag = flag;
+ if (tcpka_conn.flag) {
+ tcpka_conn.tcpka_timers.interval = interval;
+ tcpka_conn.tcpka_timers.retry_interval = retry_interval;
+ tcpka_conn.tcpka_timers.retry_count = retry_count;
+ } else {
+ tcpka_conn.tcpka_timers.interval = 0;
+ tcpka_conn.tcpka_timers.retry_interval = 0;
+ tcpka_conn.tcpka_timers.retry_count = 0;
+ }
+
+ AEXT_INFO(dev->name, "tcpka_conn_enable %d %d %d %d %d\n",
+ tcpka_conn.sess_id, tcpka_conn.flag,
+ tcpka_conn.tcpka_timers.interval,
+ tcpka_conn.tcpka_timers.retry_interval,
+ tcpka_conn.tcpka_timers.retry_count);
+
+ ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_enable", (char *)&tcpka_conn,
+ sizeof(tcpka_conn_sess_t), iovar_buf, sizeof(iovar_buf), NULL);
}
- if (!bytes_written)
- bytes_written = -1;
+ return ret;
+}
+
+static int
+wl_ext_tcpka_conn_info(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ tcpka_conn_sess_info_t *info = NULL;
+ uint32 sess_id = 0;
+ int ret = 0;
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ if (data) {
+ sscanf(data, "%d", &sess_id);
+ AEXT_INFO(dev->name, "tcpka_conn_sess_info %d\n", sess_id);
+ ret = wl_ext_iovar_getbuf(dev, "tcpka_conn_sess_info", (char *)&sess_id,
+ sizeof(uint32), iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ info = (tcpka_conn_sess_info_t *) iovar_buf;
+ ret = snprintf(command, total_len, "id=%d, ipid=%d, seq=%u, ack=%u",
+ sess_id, info->ipid, info->seq, info->ack);
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+ }
- return bytes_written;
+ return ret;
}
#endif
wl_config_t rsdb_mode_cfg = {1, 0}, *rsdb_p;
int ret = 0;
- ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
-
if (data) {
rsdb_mode_cfg.config = (int)simple_strtol(data, NULL, 0);
ret = wl_ext_iovar_setbuf(dev, "rsdb_mode", (char *)&rsdb_mode_cfg,
sizeof(rsdb_mode_cfg), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- ANDROID_MSG(("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config));
+ AEXT_INFO(dev->name, "rsdb_mode %d\n", rsdb_mode_cfg.config);
} else {
ret = wl_ext_iovar_getbuf(dev, "rsdb_mode", NULL, 0,
iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (!ret) {
rsdb_p = (wl_config_t *) iovar_buf;
ret = snprintf(command, total_len, "%d", rsdb_p->config);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
- command));
+ AEXT_TRACE(dev->name, "command result is %s\n", command);
+ }
+ }
+
+ return ret;
+}
+
+static int
+wl_ext_recal(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int ret = 0, i, nchan, nssid = 0;
+ int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+ wl_scan_params_t *params = NULL;
+ int ioctl_ver;
+ char *p;
+
+ AEXT_TRACE(dev->name, "Enter\n");
+
+ if (data) {
+ params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+ params = (wl_scan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ ret = -ENOMEM;
+ goto exit;
}
+ memset(params, 0, params_size);
+
+ wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+
+ memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+ params->scan_type |= WL_SCANFLAGS_PASSIVE;
+ nchan = 2;
+ params->channel_list[0] = wf_channel2chspec(1, WL_CHANSPEC_BW_20);
+ params->channel_list[1] = wf_channel2chspec(2, WL_CHANSPEC_BW_20);
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ for (i = 0; i < nchan; i++) {
+ wl_ext_chspec_host_to_driver(ioctl_ver, params->channel_list[i]);
+ }
+
+ p = (char*)params->channel_list + nchan * sizeof(uint16);
+
+ params->channel_num = htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (nchan & WL_SCAN_PARAMS_COUNT_MASK));
+ params_size = p - (char*)params + nssid * sizeof(wlc_ssid_t);
+
+ AEXT_INFO(dev->name, "recal\n");
+ ret = wl_ext_ioctl(dev, WLC_SCAN, params, params_size, 1);
}
+exit:
+ if (params)
+ kfree(params);
return ret;
}
/* Setup event_msgs */
err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
if (unlikely(err)) {
- ANDROID_ERROR(("Get event_msgs error (%d)\n", err));
+ AEXT_ERROR(ndev->name, "Get event_msgs error (%d)\n", err);
goto eventmsg_out;
}
memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
sizeof(iovbuf), NULL);
if (unlikely(err)) {
- ANDROID_ERROR(("Set event_msgs error (%d)\n", err));
+ AEXT_ERROR(ndev->name, "Set event_msgs error (%d)\n", err);
goto eventmsg_out;
}
/* dhd_priv wl event_msg [offset] [1/0, 1 for add, 0 for remove] */
/* dhd_priv wl event_msg 40 1 */
if (data) {
- ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, data));
+ AEXT_TRACE(dev->name, "data = %s\n", data);
sscanf(data, "%d %d", &event, &add);
/* Setup event_msgs */
bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
sizeof(iovbuf), NULL);
if (unlikely(bytes_written)) {
- ANDROID_ERROR(("Get event_msgs error (%d)\n", bytes_written));
+ AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
goto eventmsg_out;
}
memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
bytes_written += snprintf(command+bytes_written, total_len, "1");
else
bytes_written += snprintf(command+bytes_written, total_len, "0");
- ANDROID_INFO(("%s\n", command));
+ AEXT_INFO(dev->name, "%s\n", command);
goto eventmsg_out;
}
bytes_written = wl_ext_add_remove_eventmsg(dev, event, add);
bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
sizeof(iovbuf), NULL);
if (bytes_written) {
- ANDROID_ERROR(("Get event_msgs error (%d)\n", bytes_written));
+ AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
goto eventmsg_out;
}
vbuf = (char *)iovbuf;
bytes_written += snprintf(command+bytes_written, total_len,
"%02x", vbuf[i] & 0xff);
}
- ANDROID_INFO(("%s\n", command));
+ AEXT_INFO(dev->name, "%s\n", command);
}
eventmsg_out:
return bytes_written;
}
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+static int
+wl_ext_pkt_filter_add(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i, filter_id, new_id = 0, cnt;
+ conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
+ char **pktfilter = dhd->pktfilter;
+ int err = 0;
+
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+
+ new_id = simple_strtol(data, NULL, 10);
+ if (new_id <= 0) {
+ AEXT_ERROR(dev->name, "wrong id %d\n", new_id);
+ return -1;
+ }
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (new_id == filter_id) {
+ AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
+ return -1;
+ }
+ }
+
+ cnt = filter_add->count;
+ if (cnt >= DHD_CONF_FILTER_MAX) {
+ AEXT_ERROR(dev->name, "not enough filter\n");
+ return -1;
+ }
+ for (i=0; i<cnt; i++) {
+ filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
+ if (new_id == filter_id) {
+ AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
+ return -1;
+ }
+ }
+
+ strcpy(&filter_add->filter[cnt][0], data);
+ dhd->pktfilter[dhd->pktfilter_count] = filter_add->filter[cnt];
+ filter_add->count++;
+ dhd->pktfilter_count++;
+
+ dhd_pktfilter_offload_set(dhd, data);
+ AEXT_INFO(dev->name, "filter id %d added\n", new_id);
+ }
+
+ return err;
+}
+
+static int
+wl_ext_pkt_filter_delete(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int i, j, filter_id, cnt;
+ char **pktfilter = dhd->pktfilter;
+ conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
+ bool in_filter = FALSE;
+ int id, err = 0;
+
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ id = (int)simple_strtol(data, NULL, 0);
+
+ cnt = filter_add->count;
+ for (i=0; i<cnt; i++) {
+ filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ memset(filter_add->filter[i], 0, PKT_FILTER_LEN);
+ for (j=i; j<(cnt-1); j++) {
+ strcpy(filter_add->filter[j], filter_add->filter[j+1]);
+ memset(filter_add->filter[j+1], 0, PKT_FILTER_LEN);
+ }
+ cnt--;
+ filter_add->count--;
+ dhd->pktfilter_count--;
+ }
+ }
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ memset(pktfilter[i], 0, strlen(pktfilter[i]));
+ }
+ }
+
+ if (in_filter) {
+ dhd_pktfilter_offload_delete(dhd, id);
+ AEXT_INFO(dev->name, "filter id %d deleted\n", id);
+ } else {
+ AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
+ err = -1;
+ }
+ }
+
+ return err;
+}
+
+static int
+wl_ext_pkt_filter_enable(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ int err = 0, id, enable;
+ int i, filter_id, cnt;
+ char **pktfilter = dhd->pktfilter;
+ bool in_filter = FALSE;
+
+ /* dhd_priv wl pkt_filter_enable [id] [1/0] */
+ /* dhd_priv wl pkt_filter_enable 141 1 */
+ if (data) {
+ sscanf(data, "%d %d", &id, &enable);
+
+ cnt = dhd->pktfilter_count;
+ for (i=0; i<cnt; i++) {
+ if (!pktfilter[i])
+ continue;
+ filter_id = simple_strtol(pktfilter[i], NULL, 10);
+ if (id == filter_id) {
+ in_filter = TRUE;
+ break;
+ }
+ }
+
+ if (in_filter) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ enable, dhd_master_mode);
+ AEXT_INFO(dev->name, "filter id %d %s\n", id, enable?"enabled":"disabled");
+ } else {
+ AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
+ err = -1;
+ }
+ }
+
+ return err;
+}
+#endif
+
#ifdef SENDPROB
static int
-wl_ext_send_probresp(struct net_device *dev, char *data, char *command, int total_len)
+wl_ext_add_del_ie(struct net_device *dev, uint pktflag, char *ie_data, const char* add_del_cmd)
{
- int err = 0, buf_len, ie_data_len = 0;
+ vndr_ie_setbuf_t *vndr_ie = NULL;
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ int ie_data_len = 0, tot_len = 0, iecount;
+ int err = -1;
+
+ if (!strlen(ie_data)) {
+ AEXT_ERROR(dev->name, "wrong ie %s\n", ie_data);
+ goto exit;
+ }
+
+ tot_len = (int)(sizeof(vndr_ie_setbuf_t) + ((strlen(ie_data)-2)/2));
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, GFP_KERNEL);
+ if (!vndr_ie) {
+ AEXT_ERROR(dev->name, "IE memory alloc failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+ strncpy(vndr_ie->cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Set the IE count - the buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+ /* Set packet flag to indicate that BEACON's will contain this IE */
+ pktflag = htod32(pktflag);
+ memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+ sizeof(u32));
+
+ /* Set the IE ID */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar)DOT11_MNG_VS_ID;
+
+ /* Set the IE LEN */
+ vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (strlen(ie_data)-2)/2;
+
+ /* Set the IE OUI and DATA */
+ ie_data_len = wl_pattern_atoh(ie_data,
+ (char *)vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui);
+ if (ie_data_len <= 0) {
+ AEXT_ERROR(dev->name, "wrong ie_data_len %d\n", strlen(ie_data)-2);
+ goto exit;
+ }
+
+ err = wl_ext_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len, iovar_buf,
+ sizeof(iovar_buf), NULL);
+
+exit:
+ if (vndr_ie) {
+ kfree(vndr_ie);
+ }
+ return err;
+}
+
+static int
+wl_ext_send_probreq(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0;
char addr_str[16], addr[6];
- char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
char ie_data[WLC_IOCTL_SMLEN] = "\0";
- uint8 add_ie_header[] = {
- 0x61, 0x64, 0x64, 0x00, /* "add" */
- 0x01, 0x00, 0x00, 0x00, /* 0x0000001: 1 element */
- 0x02, 0x00, 0x00, 0x00, /* 0x0000002: apply to probe response only */
- 0xdd /* 221 */
- };
- uint8 del[] = {0x64, 0x65, 0x6c};
+ wl_probe_params_t params;
- /* dhd_priv wl send_probresp [dest. addr] [OUI+VAL] */
- /* dhd_priv wl send_probresp 0x00904c010203 0x00904c01020304050607 */
+ /* dhd_priv wl send_probreq [dest. addr] [OUI+VAL] */
+ /* dhd_priv wl send_probreq 0x00904c010203 0x00904c01020304050607 */
if (data) {
- ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, data));
+ AEXT_TRACE(dev->name, "data = %s\n", data);
sscanf(data, "%s %s", addr_str, ie_data);
- ANDROID_TRACE(("%s: addr=%s, ie=%s\n", __FUNCTION__, addr_str, ie_data));
+ AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
- if (strlen(addr_str) != 14 || !strlen(ie_data)) {
- ANDROID_ERROR(("%s: wrong addr %s or ie %s\n", __FUNCTION__,
- addr_str, ie_data));
+ if (strlen(addr_str) != 14) {
+ AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
goto exit;
}
- wl_ext_pattern_atoh(addr_str, (char *) addr);
+ wl_pattern_atoh(addr_str, (char *) addr);
+ memset(¶ms, 0, sizeof(params));
+ memcpy(¶ms.bssid, addr, ETHER_ADDR_LEN);
+ memcpy(¶ms.mac, addr, ETHER_ADDR_LEN);
+
+ err = wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "add");
+ if (err)
+ goto exit;
+ err = wl_ext_iovar_setbuf(dev, "sendprb", (char *)¶ms, sizeof(params),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(100);
+ wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "del");
+ }
+
+exit:
+ return err;
+}
- buf_len = sizeof(add_ie_header);
- memcpy(buf, add_ie_header, buf_len);
+static int
+wl_ext_send_probresp(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0;
+ char addr_str[16], addr[6];
+ char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ char ie_data[WLC_IOCTL_SMLEN] = "\0";
- buf[buf_len] = (strlen(ie_data)-2)/2;
- buf_len++;
+ /* dhd_priv wl send_probresp [dest. addr] [OUI+VAL] */
+ /* dhd_priv wl send_probresp 0x00904c010203 0x00904c01020304050607 */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%s %s", addr_str, ie_data);
+ AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
- ie_data_len = wl_ext_pattern_atoh(ie_data, (char *) &buf[buf_len]);
- if (ie_data_len <= 0) {
- ANDROID_ERROR(("%s: wrong ie_data_len %d\n", __FUNCTION__, strlen(ie_data)-2));
+ if (strlen(addr_str) != 14) {
+ AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
goto exit;
}
- buf_len += ie_data_len;
- err = wl_ext_iovar_setbuf(dev, "ie", buf, buf_len,
- iovar_buf, sizeof(iovar_buf), NULL);
- if (err) {
- ANDROID_ERROR(("%s: failed to add ie %s\n", __FUNCTION__, ie_data));
+ wl_pattern_atoh(addr_str, (char *) addr);
+
+ err = wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "add");
+ if (err)
goto exit;
- }
err = wl_ext_iovar_setbuf(dev, "send_probresp", addr, sizeof(addr),
iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(100);
+ wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "del");
+ }
- memcpy(buf, del, sizeof(del));
- err = wl_ext_iovar_setbuf(dev, "ie", buf, buf_len,
- iovar_buf, sizeof(iovar_buf), NULL);
- if (err) {
- ANDROID_ERROR(("%s: failed to del ie %s\n", __FUNCTION__, ie_data));
- goto exit;
+exit:
+ return err;
+}
+
+static int
+wl_ext_recv_probreq(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0, enable = 0;
+ char cmd[32];
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+
+ /* enable:
+ 1. dhd_priv wl 86 0
+ 2. dhd_priv wl event_msg 44 1
+ disable:
+ 1. dhd_priv wl 86 2;
+ 2. dhd_priv wl event_msg 44 0
+ */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%d", &enable);
+ if (enable) {
+ strcpy(cmd, "wl 86 0");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl event_msg 44 1");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ dhd->recv_probereq = TRUE;
+ } else {
+ if (dhd->conf->pm)
+ strcpy(cmd, "wl 86 2"); {
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ }
+ strcpy(cmd, "wl event_msg 44 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ dhd->recv_probereq = FALSE;
+ }
+ }
+
+exit:
+ return err;
+}
+
+static int
+wl_ext_recv_probresp(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ int err = 0, enable = 0;
+ char cmd[32];
+
+ /* enable:
+ 1. dhd_priv wl pkt_filter_add 150 0 0 0 0xFF 0x50
+ 2. dhd_priv wl pkt_filter_enable 150 1
+ 3. dhd_priv wl mpc 0
+ 4. dhd_priv wl 108 1
+ disable:
+ 1. dhd_priv wl 108 0
+ 2. dhd_priv wl mpc 1
+ 3. dhd_priv wl pkt_filter_disable 150 0
+ 4. dhd_priv pkt_filter_delete 150
+ */
+ if (data) {
+ AEXT_TRACE(dev->name, "data = %s\n", data);
+ sscanf(data, "%d", &enable);
+ if (enable) {
+ strcpy(cmd, "wl pkt_filter_add 150 0 0 0 0xFF 0x50");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl pkt_filter_enable 150 1");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl mpc 0");
+ err = wl_ext_wl_iovar(dev, cmd, total_len);
+ if (err)
+ goto exit;
+ strcpy(cmd, "wl 108 1");
+ err= wl_ext_wl_iovar(dev, cmd, total_len);
+ } else {
+ strcpy(cmd, "wl 108 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl mpc 1");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl pkt_filter_enable 150 0");
+ wl_ext_wl_iovar(dev, cmd, total_len);
+ strcpy(cmd, "wl pkt_filter_delete 150");
+ wl_ext_wl_iovar(dev, cmd, total_len);
}
}
err = wl_ext_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
iovar_buf, sizeof(iovar_buf), NULL);
if (err) {
- ANDROID_ERROR(("%s: failed to set gtk_key_info\n", __FUNCTION__));
+ AEXT_ERROR(dev->name, "failed to set gtk_key_info\n");
goto exit;
}
}
return err;
}
+#ifdef WL_EXT_WOWL
+static int
+wl_ext_wowl_pattern(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ uint buf_len = 0;
+ int offset;
+ char mask[128]="\0", pattern[128]="\0", add[4]="\0",
+ mask_tmp[128], *pmask_tmp;
+ uint32 masksize, patternsize, pad_len = 0;
+ wl_wowl_pattern2_t *wowl_pattern2 = NULL;
+ wl_wowl_pattern_t *wowl_pattern = NULL;
+ char *mask_and_pattern;
+ wl_wowl_pattern_list_t *list;
+ uint8 *ptr;
+ int ret = 0, i, j, v;
+
+ if (data) {
+ sscanf(data, "%s %d %s %s", add, &offset, mask_tmp, pattern);
+ if (strcmp(add, "add") != 0 && strcmp(add, "clr") != 0) {
+ AEXT_ERROR(dev->name, "first arg should be add or clr\n");
+ goto exit;
+ }
+ if (!strcmp(add, "clr")) {
+ AEXT_INFO(dev->name, "wowl_pattern clr\n");
+ ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", add,
+ sizeof(add), iovar_buf, sizeof(iovar_buf), NULL);
+ goto exit;
+ }
+ masksize = strlen(mask_tmp) -2;
+ AEXT_TRACE(dev->name, "0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // add pading
+ if (masksize % 16)
+ pad_len = (16 - masksize % 16);
+ for (i=0; i<pad_len; i++)
+ strcat(mask_tmp, "0");
+ masksize += pad_len;
+ AEXT_TRACE(dev->name, "1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 0x00 to 0, others to 1
+ j = 0;
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/2; i++) {
+ if(strncmp(&pmask_tmp[i*2], "00", 2))
+ pmask_tmp[j] = '1';
+ else
+ pmask_tmp[j] = '0';
+ j++;
+ }
+ pmask_tmp[j] = '\0';
+ masksize = masksize / 2;
+ AEXT_TRACE(dev->name, "2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // reorder per 8bits
+ pmask_tmp = &mask_tmp[2];
+ for (i=0; i<masksize/8; i++) {
+ char c;
+ for (j=0; j<4; j++) {
+ c = pmask_tmp[i*8+j];
+ pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];
+ pmask_tmp[(i+1)*8-j-1] = c;
+ }
+ }
+ AEXT_TRACE(dev->name, "3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
+
+ // translate 8bits to 1byte
+ j = 0; v = 0;
+ pmask_tmp = &mask_tmp[2];
+ strcpy(mask, "0x");
+ for (i=0; i<masksize; i++) {
+ v = (v<<1) | (pmask_tmp[i]=='1');
+ if (((i+1)%4) == 0) {
+ if (v < 10)
+ mask[j+2] = v + '0';
+ else
+ mask[j+2] = (v-10) + 'a';
+ j++;
+ v = 0;
+ }
+ }
+ mask[j+2] = '\0';
+ masksize = j/2;
+ AEXT_TRACE(dev->name, "4 mask=%s, masksize=%d\n", mask, masksize);
+
+ patternsize = (strlen(pattern)-2)/2;
+ buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;
+ wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);
+ if (wowl_pattern2 == NULL) {
+ AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", buf_len);
+ goto exit;
+ }
+ memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));
+
+ strncpy(wowl_pattern2->cmd, add, sizeof(add));
+ wowl_pattern2->wowl_pattern.type = 0;
+ wowl_pattern2->wowl_pattern.offset = offset;
+ mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);
+
+ wowl_pattern2->wowl_pattern.masksize = masksize;
+ ret = wl_pattern_atoh(mask, mask_and_pattern);
+ if (ret == -1) {
+ AEXT_ERROR(dev->name, "rejecting mask=%s\n", mask);
+ goto exit;
+ }
+
+ mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;
+ wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +
+ wowl_pattern2->wowl_pattern.masksize;
+
+ wowl_pattern2->wowl_pattern.patternsize = patternsize;
+ ret = wl_pattern_atoh(pattern, mask_and_pattern);
+ if (ret == -1) {
+ AEXT_ERROR(dev->name, "rejecting pattern=%s\n", pattern);
+ goto exit;
+ }
+
+ AEXT_INFO(dev->name, "%s %d %s %s\n", add, offset, mask, pattern);
+
+ ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", (char *)wowl_pattern2,
+ buf_len, iovar_buf, sizeof(iovar_buf), NULL);
+ }
+ else {
+ ret = wl_ext_iovar_getbuf(dev, "wowl_pattern", NULL, 0,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ list = (wl_wowl_pattern_list_t *)iovar_buf;
+ ret = snprintf(command, total_len, "#of patterns :%d\n", list->count);
+ ptr = (uint8 *)list->pattern;
+ for (i=0; i<list->count; i++) {
+ uint8 *pattern;
+ wowl_pattern = (wl_wowl_pattern_t *)ptr;
+ ret += snprintf(command+ret, total_len,
+ "Pattern %d:\n"
+ "ID :0x%x\n"
+ "Offset :%d\n"
+ "Masksize :%d\n"
+ "Mask :0x",
+ i+1, (uint32)wowl_pattern->id, wowl_pattern->offset,
+ wowl_pattern->masksize);
+ pattern = ((uint8 *)wowl_pattern + sizeof(wl_wowl_pattern_t));
+ for (j = 0; j < wowl_pattern->masksize; j++) {
+ ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
+ }
+ ret += snprintf(command+ret, total_len, "\n");
+ ret += snprintf(command+ret, total_len,
+ "PatternSize:%d\n"
+ "Pattern :0x",
+ wowl_pattern->patternsize);
+
+ pattern = ((uint8*)wowl_pattern + wowl_pattern->patternoffset);
+ for (j=0; j<wowl_pattern->patternsize; j++)
+ ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
+ ret += snprintf(command+ret, total_len, "\n");
+ ptr += (wowl_pattern->masksize + wowl_pattern->patternsize +
+ sizeof(wl_wowl_pattern_t));
+ }
+
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+ }
+
+exit:
+ if (wowl_pattern2)
+ kfree(wowl_pattern2);
+ return ret;
+}
+
+static int
+wl_ext_wowl_wakeind(struct net_device *dev, char *data, char *command,
+ int total_len)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_wowl_wakeind_t *wake = NULL;
+ int ret = -1;
+ char clr[6]="\0";
+
+ if (data) {
+ sscanf(data, "%s", clr);
+ if (!strcmp(clr, "clear")) {
+ AEXT_INFO(dev->name, "wowl_wakeind clear\n");
+ ret = wl_ext_iovar_setbuf(dev, "wowl_wakeind", clr, sizeof(clr),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ } else {
+ AEXT_ERROR(dev->name, "first arg should be clear\n");
+ }
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "wowl_wakeind", NULL, 0,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ if (!ret) {
+ wake = (wl_wowl_wakeind_t *) iovar_buf;
+ ret = snprintf(command, total_len, "wakeind=0x%x", wake->ucode_wakeind);
+ if (wake->ucode_wakeind & WL_WOWL_MAGIC)
+ ret += snprintf(command+ret, total_len, " (MAGIC packet)");
+ if (wake->ucode_wakeind & WL_WOWL_NET)
+ ret += snprintf(command+ret, total_len, " (Netpattern)");
+ if (wake->ucode_wakeind & WL_WOWL_DIS)
+ ret += snprintf(command+ret, total_len, " (Disassoc/Deauth)");
+ if (wake->ucode_wakeind & WL_WOWL_BCN)
+ ret += snprintf(command+ret, total_len, " (Loss of beacon)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)
+ ret += snprintf(command+ret, total_len, " (TCPKA timeout)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)
+ ret += snprintf(command+ret, total_len, " (TCPKA data)");
+ if (wake->ucode_wakeind & WL_WOWL_TCPFIN)
+ ret += snprintf(command+ret, total_len, " (TCP FIN)");
+ AEXT_INFO(dev->name, "%s\n", command);
+ }
+ }
+
+ return ret;
+}
+#endif
+
typedef int (wl_ext_tpl_parse_t)(struct net_device *dev, char *data, char *command,
int total_len);
} wl_ext_iovar_tpl_t;
const wl_ext_iovar_tpl_t wl_ext_iovar_tpl_list[] = {
+ {WLC_GET_VAR, WLC_SET_VAR, "event_msg", wl_ext_event_msg},
+ {WLC_GET_VAR, WLC_SET_VAR, "gtk_key_info", wl_ext_gtk_key_info},
+ {WLC_GET_VAR, WLC_SET_VAR, "recal", wl_ext_recal},
{WLC_GET_VAR, WLC_SET_VAR, "rsdb_mode", wl_ext_rsdb_mode},
-#ifdef WLMESH
+ {WLC_GET_VAR, WLC_SET_VAR, "mkeep_alive", wl_ext_mkeep_alive},
+#ifdef PKT_FILTER_SUPPORT
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_add", wl_ext_pkt_filter_add},
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_delete", wl_ext_pkt_filter_delete},
+ {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_enable", wl_ext_pkt_filter_enable},
+#endif
+#if defined(WL_EXT_IAPSTA) && defined(WLMESH)
{WLC_GET_VAR, WLC_SET_VAR, "mesh_peer_status", wl_ext_mesh_peer_status},
#endif
- {WLC_GET_VAR, WLC_SET_VAR, "event_msg", wl_ext_event_msg},
#ifdef SENDPROB
+ {WLC_GET_VAR, WLC_SET_VAR, "send_probreq", wl_ext_send_probreq},
{WLC_GET_VAR, WLC_SET_VAR, "send_probresp", wl_ext_send_probresp},
+ {WLC_GET_VAR, WLC_SET_VAR, "recv_probreq", wl_ext_recv_probreq},
+ {WLC_GET_VAR, WLC_SET_VAR, "recv_probresp", wl_ext_recv_probresp},
+#endif
+#ifdef WL_EXT_TCPKA
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_add", wl_ext_tcpka_conn_add},
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_enable", wl_ext_tcpka_conn_enable},
+ {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_sess_info", wl_ext_tcpka_conn_info},
+#endif
+#ifdef WL_EXT_WOWL
+ {WLC_GET_VAR, WLC_SET_VAR, "wowl_pattern", wl_ext_wowl_pattern},
+ {WLC_GET_VAR, WLC_SET_VAR, "wowl_wakeind", wl_ext_wowl_wakeind},
+#endif
+#ifdef IDHCP
+ {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_dump", wl_ext_dhcpc_dump},
+ {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_param", wl_ext_dhcpc_param},
#endif
- {WLC_GET_VAR, WLC_SET_VAR, "gtk_key_info", wl_ext_gtk_key_info},
};
/*
dhd_priv wl mpc
dhd_priv wl mpc 1
*/
-int
+static int
wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len)
{
int cmd, val, ret = -1, i;
const wl_ext_iovar_tpl_t *tpl = wl_ext_iovar_tpl_list;
int tpl_count = ARRAY_SIZE(wl_ext_iovar_tpl_list);
- ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+ AEXT_TRACE(dev->name, "cmd %s\n", command);
pick_tmp = command;
pch = bcmstrtok(&pick_tmp, " ", 0); // pick wl
} else {
if (cmd == WLC_SET_VAR) {
val = (int)simple_strtol(data, NULL, 0);
- ANDROID_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));
+ AEXT_INFO(dev->name, "set %s %d\n", name, val);
ret = wl_ext_iovar_setint(dev, name, val);
} else if (cmd == WLC_GET_VAR) {
- ANDROID_TRACE(("%s: get %s\n", __FUNCTION__, name));
+ AEXT_INFO(dev->name, "get %s\n", name);
ret = wl_ext_iovar_getint(dev, name, &val);
if (!ret) {
bytes_written = snprintf(command, total_len, "%d", val);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
- command));
+ AEXT_INFO(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
} else if (data) {
val = (int)simple_strtol(data, NULL, 0);
- ANDROID_TRACE(("%s: set %d %d\n", __FUNCTION__, cmd, val));
+ AEXT_INFO(dev->name, "set %d %d\n", cmd, val);
ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), TRUE);
} else {
- ANDROID_TRACE(("%s: get %d\n", __FUNCTION__, cmd));
+ AEXT_INFO(dev->name, "get %d\n", cmd);
ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), FALSE);
if (!ret) {
bytes_written = snprintf(command, total_len, "%d", val);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
- command));
+ AEXT_INFO(dev->name, "command result is %s\n", command);
ret = bytes_written;
}
}
return ret;
}
-int wl_android_ext_priv_cmd(struct net_device *net, char *command,
+int
+wl_android_ext_priv_cmd(struct net_device *net, char *command,
int total_len, int *bytes_written)
{
int ret = 0;
else if (strnicmp(command, CMD_ROAM_TRIGGER, strlen(CMD_ROAM_TRIGGER)) == 0) {
*bytes_written = wl_ext_roam_trigger(net, command, total_len);
}
- else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
- *bytes_written = wl_ext_keep_alive(net, command, total_len);
- }
else if (strnicmp(command, CMD_PM, strlen(CMD_PM)) == 0) {
*bytes_written = wl_ext_pm(net, command, total_len);
}
*bytes_written = net_os_set_suspend_bcn_li_dtim(net, bcn_li_dtim);
}
#ifdef WL_EXT_IAPSTA
- else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0) {
- *bytes_written = wl_ext_isam_init(net, command, total_len);
- }
- else if (strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0 ||
+ strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
*bytes_written = wl_ext_isam_init(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0) {
- *bytes_written = wl_ext_iapsta_config(net, command, total_len);
- }
- else if (strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0 ||
+ strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
*bytes_written = wl_ext_iapsta_config(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0) {
- *bytes_written = wl_ext_iapsta_enable(net, command, total_len);
- }
- else if (strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0 ||
+ strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
*bytes_written = wl_ext_iapsta_enable(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0) {
- *bytes_written = wl_ext_iapsta_disable(net, command, total_len);
- }
- else if (strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0 ||
+ strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
*bytes_written = wl_ext_iapsta_disable(net, command, total_len);
}
else if (strnicmp(command, CMD_ISAM_STATUS, strlen(CMD_ISAM_STATUS)) == 0) {
*bytes_written = wl_ext_isam_status(net, command, total_len);
}
#endif
-#ifdef IDHCP
- else if (strnicmp(command, CMD_DHCPC_ENABLE, strlen(CMD_DHCPC_ENABLE)) == 0) {
- *bytes_written = wl_ext_dhcpc_enable(net, command, total_len);
- }
- else if (strnicmp(command, CMD_DHCPC_DUMP, strlen(CMD_DHCPC_DUMP)) == 0) {
- *bytes_written = wl_ext_dhcpc_dump(net, command, total_len);
- }
-#endif
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
*bytes_written = wl_cfg80211_autochannel(net, command, total_len);
}
#endif
-#ifdef WL_ESCAN
+#if defined(WL_WIRELESS_EXT) && defined(WL_ESCAN)
else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
- *bytes_written = wl_escan_autochannel(net, command, total_len);
+ *bytes_written = wl_iw_autochannel(net, command, total_len);
}
#endif
else if (strnicmp(command, CMD_WL, strlen(CMD_WL)) == 0) {
sizeof(buf), NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- ANDROID_ERROR(("bw_cap failed, %d\n", err));
+ AEXT_ERROR(net->name, "bw_cap failed, %d\n", err);
return err;
} else {
err = wl_ext_iovar_getint(net, "mimo_bw_cap", &bw_cap);
distance = 8;
else
distance = 16;
- ANDROID_INFO(("%s: bw=0x%x, distance=%d\n", __FUNCTION__, bw, distance));
+ AEXT_INFO(net->name, "bw=0x%x, distance=%d\n", bw, distance);
return distance;
}
ret = wl_ext_ioctl(net, WLC_GET_VALID_CHANNELS, &valid_chan_list,
sizeof(valid_chan_list), 0);
if (ret<0) {
- ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
+ AEXT_ERROR(net->name, "get channels failed with %d\n", ret);
return 0;
} else {
for (i = 0; i < dtoh32(list->count); i++) {
}
if (android_msg_level & ANDROID_INFO_LEVEL) {
- printf("%s: b_band: ", __FUNCTION__);
+ struct bcmstrbuf strbuf;
+ char *tmp_buf = NULL;
+ tmp_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (tmp_buf == NULL) {
+ AEXT_ERROR(net->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
+ goto exit;
+ }
+ bcm_binit(&strbuf, tmp_buf, WLC_IOCTL_SMLEN);
for (j=0; j<ARRAYSIZE(b_band); j++)
- printf("%d, ", b_band[j]);
- printf("\n");
- printf("%s: a_band1: ", __FUNCTION__);
+ bcm_bprintf(&strbuf, "%d/%d, ", b_band[j], 1+j);
+ bcm_bprintf(&strbuf, "\n");
for (j=0; j<ARRAYSIZE(a_band1); j++)
- printf("%d, ", a_band1[j]);
- printf("\n");
- printf("%s: a_band4: ", __FUNCTION__);
+ bcm_bprintf(&strbuf, "%d/%d, ", a_band1[j], 36+j*4);
+ bcm_bprintf(&strbuf, "\n");
for (j=0; j<ARRAYSIZE(a_band4); j++)
- printf("%d, ", a_band4[j]);
- printf("\n");
- printf("%s: best_2g_ch=%d, best_5g_ch=%d\n", __FUNCTION__,
+ bcm_bprintf(&strbuf, "%d/%d, ", a_band4[j], 149+j*4);
+ bcm_bprintf(&strbuf, "\n");
+ bcm_bprintf(&strbuf, "best_2g_ch=%d, best_5g_ch=%d\n",
*best_2g_ch, *best_5g_ch);
+ AEXT_INFO(net->name, "\n%s", strbuf.origbuf);
+ if (tmp_buf) {
+ kfree(tmp_buf);
+ }
}
+exit:
return 0;
}
#endif
node = *rssi_head;
for (;node;) {
- ANDROID_INFO(("%s: Free %d with BSSID %pM\n",
- __FUNCTION__, i, &node->BSSID));
+ AEXT_INFO("wlan", "Free %d with BSSID %pM\n", i, &node->BSSID);
cur = node;
node = cur->next;
kfree(cur);
{
wl_rssi_cache_t *node, *prev, **rssi_head;
int i = -1, tmp = 0;
- struct timeval now;
+ struct osl_timespec now;
- do_gettimeofday(&now);
+ osl_do_gettimeofday(&now);
rssi_head = &rssi_cache_ctrl->m_cache_head;
node = *rssi_head;
tmp = 0;
prev->next = node->next;
}
- ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
- __FUNCTION__, i, &node->BSSID));
+ AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
kfree(node);
if (tmp == 1) {
node = *rssi_head;
tmp = 0;
prev->next = node->next;
}
- ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
- __FUNCTION__, i, &node->BSSID));
+ AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
kfree(node);
if (tmp == 1) {
node = *rssi_head;
int j, k=0;
int rssi, error=0;
struct ether_addr bssid;
- struct timeval now, timeout;
+ struct osl_timespec now, timeout;
scb_val_t scbval;
if (!g_wifi_on)
error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
if (error == BCME_NOTASSOCIATED) {
- ANDROID_INFO(("%s: Not Associated! res:%d\n", __FUNCTION__, error));
+ AEXT_INFO("wlan", "Not Associated! res:%d\n", error);
return 0;
}
if (error) {
- ANDROID_ERROR(("Could not get bssid (%d)\n", error));
+ AEXT_ERROR(net->name, "Could not get bssid (%d)\n", error);
}
error = wldev_get_rssi(net, &scbval);
if (error) {
- ANDROID_ERROR(("Could not get rssi (%d)\n", error));
+ AEXT_ERROR(net->name, "Could not get rssi (%d)\n", error);
return error;
}
rssi = scbval.val;
- do_gettimeofday(&now);
+ osl_do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
- __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ AEXT_TRACE(net->name,
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
}
/* update RSSI */
prev = NULL;
for (;node;) {
if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) {
- ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d\n",
- __FUNCTION__, k, &bssid, rssi));
+ AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%d\n", k, &bssid, rssi);
for (j=0; j<RSSIAVG_LEN-1; j++)
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = rssi;
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
- ANDROID_ERROR(("%s: Memory alloc failure %d\n",
- __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
+ AEXT_ERROR(net->name, "Memory alloc failure %d\n", (int)sizeof(wl_rssi_cache_t));
return 0;
}
- ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
- __FUNCTION__, k, &bssid, rssi));
+ AEXT_INFO(net->name, "Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
+ k, &bssid, rssi);
leaf->next = NULL;
leaf->dirty = 0;
wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
wl_bss_info_t *bi = NULL;
int i, j, k;
- struct timeval now, timeout;
+ struct osl_timespec now, timeout;
if (!ss_list->count)
return;
- do_gettimeofday(&now);
+ osl_do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
- __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ AEXT_TRACE("wlan",
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
}
rssi_head = &rssi_cache_ctrl->m_cache_head;
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
for (;node;) {
if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
- ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+ AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
for (j=0; j<RSSIAVG_LEN-1; j++)
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = dtoh16(bi->RSSI);
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
- ANDROID_ERROR(("%s: Memory alloc failure %d\n",
- __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
+ AEXT_ERROR("wlan", "Memory alloc failure %d\n",
+ (int)sizeof(wl_rssi_cache_t));
return;
}
- ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
- __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+ AEXT_INFO("wlan", "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
leaf->next = NULL;
leaf->dirty = 0;
}
rssi = MIN(rssi, RSSI_MAXVAL);
if (rssi == RSSI_MINVAL) {
- ANDROID_ERROR(("%s: BSSID %pM does not in RSSI cache\n",
- __FUNCTION__, addr));
+ AEXT_ERROR("wlan", "BSSID %pM does not in RSSI cache\n", addr);
}
return (int16)rssi;
}
wl_bss_cache_t *node, *cur, **bss_head;
int i=0;
- ANDROID_TRACE(("%s called\n", __FUNCTION__));
+ AEXT_TRACE("wlan", "called\n");
bss_head = &bss_cache_ctrl->m_cache_head;
node = *bss_head;
for (;node;) {
- ANDROID_TRACE(("%s: Free %d with BSSID %pM\n",
- __FUNCTION__, i, &node->results.bss_info->BSSID));
+ AEXT_TRACE("wlan", "Free %d with BSSID %pM\n",
+ i, &node->results.bss_info->BSSID);
cur = node;
node = cur->next;
kfree(cur);
{
wl_bss_cache_t *node, *prev, **bss_head;
int i = -1, tmp = 0;
- struct timeval now;
+ struct osl_timespec now;
- do_gettimeofday(&now);
+ osl_do_gettimeofday(&now);
bss_head = &bss_cache_ctrl->m_cache_head;
node = *bss_head;
tmp = 0;
prev->next = node->next;
}
- ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, i, &node->results.bss_info->BSSID,
- dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
+ AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
kfree(node);
if (tmp == 1) {
node = *bss_head;
tmp = 0;
prev->next = node->next;
}
- ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, i, &node->results.bss_info->BSSID,
- dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
+ AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
kfree(node);
if (tmp == 1) {
node = *bss_head;
#else
rssi = dtoh16(node->results.bss_info->RSSI);
#endif
- ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, k, &node->results.bss_info->BSSID, rssi,
- node->results.bss_info->SSID));
+ AEXT_TRACE("wlan", "dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID);
k++;
node = node->next;
}
#if defined(SORT_BSS_BY_RSSI)
int16 rssi, rssi_node;
#endif
- struct timeval now, timeout;
+ struct osl_timespec now, timeout;
if (!ss_list->count)
return;
- do_gettimeofday(&now);
+ osl_do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
- __FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ AEXT_TRACE("wlan",
+ "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
+ BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
}
bss_head = &bss_cache_ctrl->m_cache_head;
leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL);
if (!leaf) {
- ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__,
- dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t)));
+ AEXT_ERROR("wlan", "Memory alloc failure %d\n",
+ dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t));
return;
}
if (node) {
kfree(node);
node = NULL;
- ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+ AEXT_TRACE("wlan",
+ "Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
} else
- ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+ AEXT_TRACE("wlan",
+ "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
leaf->next = NULL;
void
wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
{
- ANDROID_TRACE(("%s:\n", __FUNCTION__));
+ AEXT_TRACE("wlan", "Enter\n");
wl_free_bss_cache(bss_cache_ctrl);
}
#endif
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.c 771488 2018-07-10 09:10:11Z $
+ * $Id: wl_cfg80211.c 826086 2019-06-18 19:23:59Z $
*/
/* */
#include <typedefs.h>
#include <linuxver.h>
-#include <osl.h>
#include <linux/kernel.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmwifi_channels.h>
#include <bcmendian.h>
#include <ethernet.h>
#include <eapol.h>
#endif /* WL_WPS_SYNC */
#include <802.11.h>
-#include <fils.h>
-#include <frag.h>
#include <bcmiov.h>
#include <linux/if_arp.h>
#include <asm/uaccess.h>
#include <wldev_common.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
#include <bcmdevs.h>
+#ifdef WL_FILS
+#include <fils.h>
+#include <frag.h>
+#endif /* WL_FILS */
#include <wl_android.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_linux.h>
+#include <dhd_linux_pktdump.h>
#include <dhd_debug.h>
#include <dhdioctl.h>
#include <wlioctl.h>
#define BRCM_SAE_VENDOR_EVENT_BUF_LEN 500
+#ifdef DNGL_AXI_ERROR_LOGGING
+#include <bcmtlv.h>
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#include <linux/dev_ril_bridge.h>
+#include <linux/notifier.h>
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
#ifdef BCMWAPI_WPI
/* these items should evetually go into wireless.h of the linux system headfile dir */
#ifndef IW_ENCODE_ALG_SM4
#define WLAN_EID_SSID 0
#define CH_MIN_5G_CHANNEL 34
-#define CH_MIN_2G_CHANNEL 1
-#define ACTIVE_SCAN 1
-#define PASSIVE_SCAN 0
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-(entry) = list_first_entry((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#else
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-(entry) = list_first_entry((ptr), type, member); \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member); \
-
-#endif /* STRICT_GCC_WARNINGS */
#ifdef WL_RELMCAST
enum rmc_event_type {
};
#endif /* WL_RELMCAST */
-#ifdef WL_LASTEVT
-typedef struct wl_last_event {
- uint32 current_time; /* current tyime */
- uint32 timestamp; /* event timestamp */
- wl_event_msg_t event; /* Encapsulated event */
-} wl_last_event_t;
-#endif /* WL_LASTEVT */
-
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
* and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
{
.num_different_channels = NUM_DIFF_CHANNELS,
/*
- * max_interfaces = 4
- * The max no of interfaces will be used in dual p2p case.
- * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we
- * will not be using the STA functionality in this case, it
- * will remain registered as it is the primary interface.
+ * At Max 5 network interfaces can be registered concurrently
*/
- .max_interfaces = 4,
+ .max_interfaces = IFACE_MAX_CNT,
.limits = common_if_limits,
.n_limits = ARRAY_SIZE(common_if_limits),
},
#define PM_BLOCK 1
#define PM_ENABLE 0
-#ifdef BCMCCX
-#ifndef WLAN_AKM_SUITE_CCKM
-#define WLAN_AKM_SUITE_CCKM 0x00409600
-#endif // endif
-#define DOT11_LEAP_AUTH 0x80 /* LEAP auth frame paylod constants */
-#endif /* BCMCCX */
-
-#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
-#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+/* GCMP crypto supported above kernel v4.0 */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
+#define WL_GCMP
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
#ifndef IBSS_COALESCE_ALLOWED
#define IBSS_COALESCE_ALLOWED IBSS_COALESCE_DEFAULT
#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
#define LONG_LISTEN_TIME 2000
+#ifdef RTT_SUPPORT
+static s32 wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CHAN_UTIL
+static s32 wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_cfg80211_start_bssload_report(struct net_device *ndev);
+#endif /* WL_CHAN_UTIL */
+
#ifdef SUPPORT_AP_RADIO_PWRSAVE
#define RADIO_PWRSAVE_PPS 10
#define RADIO_PWRSAVE_QUIET_TIME 10
#define RADIO_PWRSAVE_STAS_ASSOC_CHECK 0
#define RADIO_PWRSAVE_LEVEL_MIN 1
-#define RADIO_PWRSAVE_LEVEL_MAX 5
+#define RADIO_PWRSAVE_LEVEL_MAX 9
#define RADIO_PWRSAVE_PPS_MIN 1
#define RADIO_PWRSAVE_QUIETTIME_MIN 1
#define RADIO_PWRSAVE_ASSOCCHECK_MIN 0
((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
- * Attribute ID(1) + Length(2) + 1(Mininum length:1)
- */
-#define MAX_P2P_IE_LEN 251 /* Up To 251 */
+/* SoftAP related parameters */
+#define DEFAULT_2G_SOFTAP_CHANNEL 1
+#define DEFAULT_5G_SOFTAP_CHANNEL 149
+#define WL_MAX_NUM_CSA_COUNTERS 255
-#define MAX_VNDR_OUI_STR_LEN 256
-#define VNDR_OUI_STR_LEN 10
+#define MAX_VNDR_OUI_STR_LEN 256u
+#define VNDR_OUI_STR_LEN 10u
+#define DOT11_DISCONNECT_RC 2u
static const uchar *exclude_vndr_oui_list[] = {
"\x00\x50\xf2", /* Microsoft */
"\x00\x00\xf0", /* Samsung Elec */
struct list_head list;
} wl_vndr_oui_entry_t;
+#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P) || \
+ defined(SUPPORT_AP_BWCTRL)
+#define WL_HE_FEATURES_HE_AP 0x8
+#define WL_HE_FEATURES_HE_P2P 0x20
+#endif // endif
+
static int wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg,
struct net_device *ndev, char *vndr_oui, u32 vndr_oui_len);
static void wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg);
+static s32 wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies);
#if defined(WL_FW_OCE_AP_SELECT)
static bool
static s32 wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
const struct ether_addr *da, const struct ether_addr *sa,
const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody);
-static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid);
-#if defined(WL_CFG80211_P2P_DEV_IF)
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
-#else
-static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-static void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
#ifdef WLAIBSS_MCHAN
static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
-static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, bool aborted, bool fw_abort);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
u8 *peer, enum nl80211_tdls_operation oper);
#endif // endif
#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
-#ifdef WL_SCHED_SCAN
-static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- , u64 reqid
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
-#endif /* WL_SCHED_SCAN */
static s32 wl_cfg80211_set_ap_role(struct bcm_cfg80211 *cfg, struct net_device *dev);
struct wireless_dev *
chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+int wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf);
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
/*
* event & event Q handlers for cfg80211 interfaces
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed);
static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
#endif /* BT_WIFI_HANDOVER */
-#ifdef WL_SCHED_SCAN
-static s32
-wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_SCHED_SCAN */
-#ifdef PNO_SUPPORT
-static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* PNO_SUPPORT */
#ifdef GSCAN_SUPPORT
-static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
static s32 wl_handle_roam_exp_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* GSCAN_SUPPORT */
const wl_event_msg_t *e, void *data);
#endif /* WL_MBO */
-#ifdef WLTDLS
-static s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
- enum wl_tdls_config state, bool tdls_mode);
-static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WLTDLS */
/*
* register/deregister parent device
*/
*/
static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, const void *data, s32 item);
-static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
/*
static s32 wl_set_set_wapi_ie(struct net_device *dev,
struct cfg80211_connect_params *sme);
#endif // endif
+#ifdef WL_GCMP
+static s32 wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask);
+#endif /* WL_GCMP */
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
static s32 wl_ch_to_chanspec(struct net_device *dev, int ch,
struct wl_join_params *join_params, size_t *join_params_size);
static void wl_rst_ie(struct bcm_cfg80211 *cfg);
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool roam);
+ bool update_ssid);
static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
static int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie, const u8** rsn_cap);
#endif // endif
-#ifdef WL11U
-static bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len);
-static s32
-wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx);
-static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len);
-#endif /* WL11U */
-
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, dhd_pub_t *data);
static void wl_free_wdev(struct bcm_cfg80211 *cfg);
-static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool roam);
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool update_ssid);
static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
s32 wl_cfg80211_channel_to_freq(u32 channel);
-
-#ifdef WL_IRQSET
-static void wl_irq_set_work_handler(struct work_struct *work);
-#define IRQ_SET_DURATION 23000
-#endif /* WL_IRQSET */
-
static void wl_cfg80211_work_handler(struct work_struct *work);
static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
*/
static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
-
-#ifdef WL_LASTEVT
-static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data);
-#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e, data)
-#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
-#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e)
-#endif /* WL_LASTEVT */
static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
struct net_device *ndev);
static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
#endif // endif
-static wl_scan_params_t *wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg,
- int channel, int nprobes, int *out_params_size);
static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
#ifdef WL_CFG80211_ACL
const struct ether_addr *bssid);
static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
-static s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
-static s32 wl_check_vif_support(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype);
-bool wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len);
-
#ifdef WL_WPS_SYNC
static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg);
static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg);
static s32 wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac);
static void wl_wps_handle_ifdel(struct net_device *ndev);
#endif /* WL_WPS_SYNC */
-const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
#if defined(WL_FW_OCE_AP_SELECT)
bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint);
#endif /* WL_FW_OCE_AP_SELECT */
+#ifdef WL_BCNRECV
+static s32 wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CAC_TS
+static s32 wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_CAC_TS */
+
+#if defined(WL_MBO) || defined(WL_OCE)
+static s32 wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_MBO || WL_OCE */
+
static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
- defined(CFG80211_DISCONNECTED_V2))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
-#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- BCM_REFERENCE(loc_gen); \
- cfg80211_disconnected(dev, reason, ie, len, gfp);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
-
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) || (defined(CONFIG_ARCH_MSM) && \
defined(CFG80211_DISCONNECTED_V2))
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
#else
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
#ifdef PROP_TXSTATUS_VSDB
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
-static int wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
- struct net_device *dev, uint action, enum wl_ext_status status, void *context);
-
-extern int passive_channel_skip;
static s32
wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
static s32
wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
+#ifdef SUPPORT_AP_BWCTRL
+static void
+wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec);
+static void
+wl_restore_ap_bw(struct bcm_cfg80211 *cfg);
+#endif /* SUPPORT_AP_BWCTRL */
+
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \
0)))
struct chan_info {
* are supporting MFP. So advertise only when MFP support is enabled.
*/
WLAN_CIPHER_SUITE_AES_CMAC,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
#endif /* MFP */
+
#ifdef BCMWAPI_WPI
WLAN_CIPHER_SUITE_SMS4,
#endif // endif
#if defined(WLAN_CIPHER_SUITE_PMK)
WLAN_CIPHER_SUITE_PMK,
#endif /* WLAN_CIPHER_SUITE_PMK */
+#ifdef WL_GCMP
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+#endif /* WL_GCMP */
};
#ifdef WL_SUPPORT_ACS
int ioctl_version;
#ifdef DEBUGFS_CFG80211
#define SUBLOGLEVEL 20
-#define SUBLOGLEVELZ SUBLOGLEVEL + 1
+#define SUBLOGLEVELZ ((SUBLOGLEVEL) + (1))
static const struct {
u32 log_level;
char *sublogname;
};
#endif // endif
-#define BUFSZ 5
+typedef struct rsn_cipher_algo_entry {
+ u32 cipher_suite;
+ u32 wsec_algo;
+ u32 wsec_key_algo;
+} rsn_cipher_algo_entry_t;
+
+static const rsn_cipher_algo_entry_t rsn_cipher_algo_lookup_tbl[] = {
+ {WLAN_CIPHER_SUITE_WEP40, WEP_ENABLED, CRYPTO_ALGO_WEP1},
+ {WLAN_CIPHER_SUITE_WEP104, WEP_ENABLED, CRYPTO_ALGO_WEP128},
+ {WLAN_CIPHER_SUITE_TKIP, TKIP_ENABLED, CRYPTO_ALGO_TKIP},
+ {WLAN_CIPHER_SUITE_CCMP, AES_ENABLED, CRYPTO_ALGO_AES_CCM},
+ {WLAN_CIPHER_SUITE_AES_CMAC, AES_ENABLED, CRYPTO_ALGO_BIP},
+#ifdef BCMWAPI_WPI
+ {WLAN_CIPHER_SUITE_SMS4, SMS4_ENABLED, CRYPTO_ALGO_SMS4},
+#endif /* BCMWAPI_WPI */
+#ifdef WL_GCMP
+ {WLAN_CIPHER_SUITE_GCMP, AES_ENABLED, CRYPTO_ALGO_AES_GCM},
+ {WLAN_CIPHER_SUITE_GCMP_256, AES_ENABLED, CRYPTO_ALGO_AES_GCM256},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_128, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC},
+ {WLAN_CIPHER_SUITE_BIP_GMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC256},
+#endif /* WL_GCMP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ {WLAN_CIPHER_SUITE_BIP_CMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_CMAC256},
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
+};
+
+typedef struct rsn_akm_wpa_auth_entry {
+ u32 akm_suite;
+ u32 wpa_auth;
+} rsn_akm_wpa_auth_entry_t;
+
+static const rsn_akm_wpa_auth_entry_t rsn_akm_wpa_auth_lookup_tbl[] = {
+#ifdef WL_OWE
+ {WLAN_AKM_SUITE_OWE, WPA3_AUTH_OWE},
+#endif /* WL_OWE */
+ {WLAN_AKM_SUITE_8021X, WPA2_AUTH_UNSPECIFIED},
+ {WL_AKM_SUITE_SHA256_1X, WPA2_AUTH_1X_SHA256},
+ {WL_AKM_SUITE_SHA256_PSK, WPA2_AUTH_PSK_SHA256},
+ {WLAN_AKM_SUITE_PSK, WPA2_AUTH_PSK},
+ {WLAN_AKM_SUITE_FT_8021X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT},
+ {WLAN_AKM_SUITE_FILS_SHA256, WPA2_AUTH_FILS_SHA256},
+ {WLAN_AKM_SUITE_FILS_SHA384, WPA2_AUTH_FILS_SHA384},
+ {WLAN_AKM_SUITE_8021X_SUITE_B, WPA3_AUTH_1X_SUITE_B_SHA256},
+ {WLAN_AKM_SUITE_8021X_SUITE_B_192, WPA3_AUTH_1X_SUITE_B_SHA384},
+#ifdef BCMWAPI_WPI
+ {WLAN_AKM_SUITE_WAPI_CERT, WAPI_AUTH_UNSPECIFIED},
+ {WLAN_AKM_SUITE_WAPI_PSK, WAPI_AUTH_PSK},
+#endif /* BCMWAPI_WPI */
+#ifdef WL_SAE
+ {WLAN_AKM_SUITE_SAE, WPA3_AUTH_SAE_PSK},
+#endif /* WL_SAE */
+ {WLAN_AKM_SUITE_FT_8021X_SHA384, WPA3_AUTH_1X_SUITE_B_SHA384 | WPA2_AUTH_FT}
+};
+
+#define BUFSZ 8
#define BUFSZN BUFSZ + 1
#define _S(x) #x
return lchspec;
}
+bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg)
+{
+ return cfg->hal_started;
+}
+
/* given a chanspec value, do the endian and chanspec version conversion to
* a chanspec_t value
* Returns INVCHANSPEC on error
wl_ch_host_to_driver(u16 channel)
{
chanspec_t chanspec;
+ chanspec_band_t band;
- chanspec = channel & WL_CHANSPEC_CHAN_MASK;
-
- if (channel <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
+ band = WL_CHANNEL_BAND(channel);
- chanspec |= WL_CHANSPEC_BW_20;
-
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ chanspec = wf_create_20MHz_chspec(channel, band);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
return wl_chspec_host_to_driver(chanspec);
}
char *c = NULL;
int count = 0;
- memset(n, 0, ETHER_ADDR_LEN);
+ bzero(n, ETHER_ADDR_LEN);
for (;;) {
n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
if (!*c++ || count == ETHER_ADDR_LEN)
u16 channel = WL_P2P_TEMP_CHAN;
char *buf;
- memset(&bssid, 0, sizeof(bssid));
+ bzero(&bssid, sizeof(bssid));
if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, sizeof(bssid)))) {
/* STA interface is not associated. So start the new interface on a temp
* channel . Later proper channel will be applied by the above framework
{
u16 chspec;
s16 cfg_type;
- u32 timeout;
+ long timeout;
s32 err;
u16 p2p_iftype;
int dhd_mode;
WL_ERR(("p2p not initialized\n"));
return NULL;
}
+
#if defined(WL_CFG80211_P2P_DEV_IF)
if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
/* Handle Dedicated P2P discovery Interface */
- cfg->down_disc_if = FALSE;
return wl_cfgp2p_add_p2p_disc_if(cfg);
}
#endif /* WL_CFG80211_P2P_DEV_IF */
wl_cfgp2p_init_discovery(cfg);
}
- strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
- cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0';
+ strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
/* In concurrency case, STA may be already associated in a particular channel.
* so retrieve the current channel of primary interface and then start the virtual
* interface on that.
* bss: "cfg p2p_ifadd"
*/
wl_set_p2p_status(cfg, IF_ADDING);
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
cfg_type = wl_cfgp2p_get_conn_idx(cfg);
if (cfg_type == BCME_ERROR) {
wl_clr_p2p_status(cfg, IF_ADDING);
return NULL;
}
-static s32
-wl_check_vif_support(struct bcm_cfg80211 *cfg, wl_iftype_t wl_iftype)
+bool
+wl_cfg80211_check_vif_in_use(struct net_device *ndev)
{
- s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool nan_enabled = FALSE;
#ifdef WL_NAN
- if ((cfg->nan_enable) && (wl_iftype != WL_IF_TYPE_NAN)) {
- ret = wl_cfgnan_disable(cfg, NAN_CONCURRENCY_CONFLICT);
- if (ret != BCME_OK) {
- WL_ERR(("failed to disable nan, error[%d]\n", ret));
- goto exit;
- }
- }
+ nan_enabled = cfg->nan_enable;
#endif /* WL_NAN */
- /* If P2PGroup/Softap is enabled, another VIF
- * iface create request can't be supported
- */
- if ((wl_cfgp2p_vif_created(cfg)) ||
+
+ if (nan_enabled || (wl_cfgp2p_vif_created(cfg)) ||
(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Additional vif can't be supported [%d]\n",
- dhd->op_mode));
- ret = -ENOTSUPP;
- goto exit;
+ WL_MEM(("%s: Virtual interfaces in use. NAN %d P2P %d softAP %d\n",
+ __FUNCTION__, nan_enabled, wl_cfgp2p_vif_created(cfg),
+ (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)));
+ return TRUE;
}
-exit:
- return ret;
+
+ return FALSE;
}
void
#endif /* WL_BCNRECV */
wl_cfg80211_scan_abort(cfg);
wl_wlfc_enable(cfg, true);
-
#ifdef WLTDLS
- if (wl_iftype == WL_IF_TYPE_NAN) {
- /* disable TDLS on NAN IF create */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_NDI_CREATE, false);
- }
- else {
- /* disable TDLS if number of connected interfaces is >= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
- }
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
#endif /* WLTDLS */
break;
case WL_IF_DELETE_REQ:
#endif /* WPS_SYNC */
if (wl_get_drv_status(cfg, SCANNING, ndev)) {
/* Send completion for any pending scans */
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#ifdef CUSTOM_SET_CPUCORE
}
if (wl_mode == WL_MODE_AP) {
/* Common code for AP/GO */
- wl_set_drv_status(cfg, CONNECTED, ndev);
}
break;
case WL_IF_DELETE_DONE:
s16 err;
s32 cfg_type;
struct net_device *ndev;
- s32 timeout;
+ long timeout;
if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
WL_INFORM_MEM(("device is not ready\n"));
return BCME_NOTFOUND;
}
-
#ifdef WL_CFG80211_P2P_DEV_IF
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
/* Handle dedicated P2P discovery interface. */
}
}
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
wl_set_p2p_status(cfg, IF_DELETING);
DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
* Firmware would be cleaned up via WiFi reset done by the
* user space from hang event context (for android only).
*/
- memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
return err;
}
-static struct wireless_dev *
-wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
+#ifdef WL_IFACE_MGMT_CONF
+#ifdef WL_IFACE_MGMT
+static s32
+wl_cfg80211_is_policy_config_allowed(struct bcm_cfg80211 *cfg)
{
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- WL_ERR(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
- return ERR_PTR(-EOPNOTSUPP);
-#else
- struct wireless_dev *wdev;
- struct net_device* ndev = NULL;
+ s32 ret = BCME_OK;
+ wl_iftype_t active_sec_iface = WL_IFACE_NOT_PRESENT;
+ bool p2p_disc_on = false;
+ bool sta_assoc_state = false;
- dhd_add_monitor(name, &ndev);
+ mutex_lock(&cfg->if_sync);
- wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
- if (!wdev) {
- WL_ERR(("wireless_dev alloc failed! \n"));
- goto fail;
+ sta_assoc_state = (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||
+ wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg)));
+ active_sec_iface = wl_cfg80211_get_sec_iface(cfg);
+ p2p_disc_on = wl_get_p2p_status(cfg, SCANNING);
+
+ if ((sta_assoc_state == TRUE) || (p2p_disc_on == TRUE) ||
+ (cfg->nan_init_state == TRUE) ||
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ WL_INFORM_MEM(("Active iface matrix: sta_assoc_state = %d,"
+ " p2p_disc = %d, nan_disc = %d, active iface = %s\n",
+ sta_assoc_state, p2p_disc_on, cfg->nan_init_state,
+ wl_iftype_to_str(active_sec_iface)));
+ ret = BCME_BUSY;
+ }
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
+#ifdef WL_NANP2P
+int
+wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
}
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_MONITOR;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wiphy));
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
+ }
- WL_DBG(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
- return ndev->ieee80211_ptr;
-fail:
- return ERR_PTR(-EOPNOTSUPP);
-#endif // endif
+ if (arg_val) {
+ cfg->conc_disc |= arg_val;
+ } else {
+ cfg->conc_disc &= ~arg_val;
+ }
+ return BCME_OK;
}
-static struct wireless_dev *
-wl_cfg80211_add_ibss(struct wiphy *wiphy, u16 wl_iftype, char const *name)
+uint8
+wl_cfg80211_get_iface_conc_disc(struct net_device *ndev)
{
-#ifdef WLAIBSS_MCHAN
- /* AIBSS */
- return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
-#else
- /* Normal IBSS */
- WL_ERR(("IBSS not supported on Virtual iface\n"));
- return NULL;
-#endif // endif
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ return cfg->conc_disc;
}
-
-s32
-wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype)
+#endif /* WL_NANP2P */
+#ifdef WL_IFACE_MGMT
+int
+wl_cfg80211_set_iface_policy(struct net_device *ndev,
+ char *arg, int len)
{
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- u16 org_toggle_bytes;
- u16 cur_toggle_bytes;
- u16 toggled_bit;
+ int ret = BCME_OK;
+ uint8 i = 0;
+ iface_mgmt_data_t *iface_data = NULL;
- if (!ndev || !mac_addr) {
- return -EINVAL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
}
- if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_AP) ||
- (wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
- /* Avoid invoking release mac addr code for interfaces using
- * fixed mac addr.
- */
- return BCME_OK;
+ if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
+ WL_ERR(("Cant allow iface management modifications\n"));
+ return BCME_BUSY;
}
- /* Fetch last two bytes of mac address */
- org_toggle_bytes = ntoh16(*((u16 *)&ndev->dev_addr[4]));
- cur_toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+ if (!arg || len <= 0 || len > sizeof(iface_mgmt_data_t)) {
+ return BCME_BADARG;
+ }
- toggled_bit = (org_toggle_bytes ^ cur_toggle_bytes);
- WL_DBG(("org_toggle_bytes:%04X cur_toggle_bytes:%04X\n",
- org_toggle_bytes, cur_toggle_bytes));
- if (toggled_bit & cfg->vif_macaddr_mask) {
- /* This toggled_bit is marked in the used mac addr
- * mask. Clear it.
- */
- cfg->vif_macaddr_mask &= ~toggled_bit;
- WL_INFORM(("MAC address - " MACDBG " released. toggled_bit:%04X vif_mask:%04X\n",
- MAC2STRDBG(mac_addr), toggled_bit, cfg->vif_macaddr_mask));
- } else {
- WL_ERR(("MAC address - " MACDBG " not found in the used list."
- " toggled_bit:%04x vif_mask:%04x\n", MAC2STRDBG(mac_addr),
- toggled_bit, cfg->vif_macaddr_mask));
- return -EINVAL;
+ iface_data = (iface_mgmt_data_t *)arg;
+ if (iface_data->policy >= WL_IF_POLICY_INVALID) {
+ WL_ERR(("Unexpected value of policy = %d\n",
+ iface_data->policy));
+ return BCME_BADARG;
}
- return BCME_OK;
+ bzero(&cfg->iface_data, sizeof(iface_mgmt_data_t));
+ ret = memcpy_s(&cfg->iface_data, sizeof(iface_mgmt_data_t), arg, len);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy iface data, src len = %d\n", len));
+ return ret;
+ }
+
+ if (cfg->iface_data.policy == WL_IF_POLICY_ROLE_PRIORITY) {
+ for (i = 0; i < WL_IF_TYPE_MAX; i++) {
+ WL_DBG(("iface = %s, priority[i] = %d\n",
+ wl_iftype_to_str(i), cfg->iface_data.priority[i]));
+ }
+ }
+
+ return ret;
}
-s32
-wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr)
+uint8
+wl_cfg80211_get_iface_policy(struct net_device *ndev)
+
{
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- u16 toggle_mask;
- u16 toggle_bit;
- u16 toggle_bytes;
- u16 used;
- u32 offset = 0;
- /* Toggle mask starts from MSB of second last byte */
- u16 mask = 0x8000;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
- if (!mac_addr) {
- return -EINVAL;
+ return cfg->iface_data.policy;
+}
+#endif /* WL_IFACE_MGMT */
+#endif /* WL_IFACE_MGMT_CONF */
+
+#ifdef WL_IFACE_MGMT
+/* Get active secondary data iface type */
+wl_iftype_t
+wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg)
+{
+#ifndef WL_STATIC_IF
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* !WL_STATIC_IF */
+ struct net_device *p2p_ndev = NULL;
+
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg,
+ P2PAPI_BSSCFG_CONNECTION1);
+
+#ifdef WL_STATIC_IF
+ if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
+ if (IS_AP_IFACE(cfg->static_ndev->ieee80211_ptr)) {
+ return WL_IF_TYPE_AP;
+ }
}
+#else
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ return WL_IF_TYPE_AP;
+ }
+#endif /* WL_STATIC_IF */
- memcpy(mac_addr, ndev->dev_addr, ETH_ALEN);
-/*
- * VIF MAC address managment
- * P2P Device addres: Primary MAC with locally admin. bit set
- * P2P Group address/NAN NMI/Softap/NAN DPI: Primary MAC addr
- * with local admin bit set and one additional bit toggled.
- * cfg->vif_macaddr_mask will hold the info regarding the mac address
- * released. Ensure to call wl_release_vif_macaddress to free up
- * the mac address.
- */
- if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
- mac_addr[0] |= 0x02;
- } else if ((wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
- mac_addr[0] |= 0x02;
- mac_addr[4] ^= 0x80;
- } else {
- /* For locally administered mac addresses, we keep the
- * OUI part constant and just work on the last two bytes.
- */
- mac_addr[0] |= 0x02;
- toggle_mask = cfg->vif_macaddr_mask;
- toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
- do {
- used = toggle_mask & mask;
- if (!used) {
- /* Use this bit position */
- toggle_bit = mask >> offset;
- toggle_bytes ^= toggle_bit;
- cfg->vif_macaddr_mask |= toggle_bit;
- WL_DBG(("toggle_bit:%04X toggle_bytes:%04X toggle_mask:%04X\n",
- toggle_bit, toggle_bytes, cfg->vif_macaddr_mask));
- /* Macaddress are stored in network order */
- mac_addr[5] = *((u8 *)&toggle_bytes);
- mac_addr[4] = *(((u8 *)&toggle_bytes + 1));
- break;
- }
+ if (p2p_ndev && p2p_ndev->ieee80211_ptr) {
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+ return WL_IF_TYPE_P2P_GO;
+ }
- /* Shift by one */
- toggle_mask = toggle_mask << 0x1;
- offset++;
- if (offset > MAX_VIF_OFFSET) {
- /* We have used up all macaddresses. Something wrong! */
- WL_ERR(("Entire range of macaddress used up.\n"));
- ASSERT(0);
- break;
- }
- } while (true);
+ if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ return WL_IF_TYPE_P2P_GC;
+ }
}
- WL_INFORM_MEM(("Get virtual I/F mac addr: "MACDBG"\n", MAC2STRDBG(mac_addr)));
- return 0;
+
+#ifdef WL_NAN
+ if (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg))) {
+ return WL_IF_TYPE_NAN;
+ }
+#endif /* WL_NAN */
+ return WL_IFACE_NOT_PRESENT;
}
-/* All Android/Linux private/Vendor Interface calls should make
- * use of below API for interface creation.
- */
-struct wireless_dev *
-wl_cfg80211_add_if(struct bcm_cfg80211 *cfg,
- struct net_device *primary_ndev,
- wl_iftype_t wl_iftype, const char *name, u8 *mac)
+/*
+* Handle incoming data interface request based on policy.
+* If there is any conflicting interface, that will be
+* deleted.
+*/
+s32
+wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
{
- u8 mac_addr[ETH_ALEN];
- s32 err = -ENODEV;
- struct wireless_dev *wdev = NULL;
- struct wiphy *wiphy;
- s32 wl_mode;
- dhd_pub_t *dhd;
- wl_iftype_t macaddr_iftype = wl_iftype;
+ s32 ret = BCME_OK;
+ bool del_iface = false;
+ wl_iftype_t sec_wl_if_type = wl_cfg80211_get_sec_iface(cfg);
- WL_INFORM_MEM(("if name: %s, wl_iftype:%d \n",
- name ? name : "NULL", wl_iftype));
- if (!cfg || !primary_ndev || !name) {
- WL_ERR(("cfg/ndev/name ptr null\n"));
- return NULL;
- }
- if (wl_cfg80211_get_wdev_from_ifname(cfg, name)) {
- WL_ERR(("Interface name %s exists!\n", name));
- return NULL;
- }
- wiphy = bcmcfg_to_wiphy(cfg);
- dhd = (dhd_pub_t *)(cfg->pub);
- if (!dhd) {
- return NULL;
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
}
- if ((wl_mode = wl_iftype_to_mode(wl_iftype)) < 0) {
- return NULL;
+ if (sec_wl_if_type == WL_IFACE_NOT_PRESENT) {
+ /*
+ * If there is no active secondary I/F, there
+ * is no interface conflict. Do nothing.
+ */
+ return BCME_OK;
}
- if ((err = wl_check_vif_support(cfg, wl_iftype)) < 0) {
- return NULL;
+ /* Handle secondary data link case */
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM:
+ case WL_IF_POLICY_DEFAULT: {
+ if (sec_wl_if_type == WL_IF_TYPE_NAN) {
+ /* NAN has the lowest priority */
+ del_iface = true;
+ } else {
+ /* Active iface is present, returning error */
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Found active iface = %s, can't support new iface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type), wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ WL_INFORM_MEM(("Remove active sec data interface, allow incoming iface\n"));
+ /* Delete existing data iface and allow incoming sec iface */
+ del_iface = true;
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ del_iface = true;
+ } else {
+ WL_ERR(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface policy = %d\n",
+ cfg->iface_data.policy));
+ return BCME_ERROR;
+ }
}
-
- /* Protect the interace op context */
- mutex_lock(&cfg->if_sync);
- /* Do pre-create ops */
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr, WL_IF_CREATE_REQ,
- wl_iftype, wl_mode);
-
- if (strnicmp(name, SOFT_AP_IF_NAME, strlen(SOFT_AP_IF_NAME)) == 0) {
- macaddr_iftype = WL_IF_TYPE_AP;
+ if (del_iface) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
}
+ return ret;
+}
- if (mac) {
- /* If mac address is provided, use that */
- memcpy(mac_addr, mac, ETH_ALEN);
- } else if ((wl_get_vif_macaddr(cfg, macaddr_iftype, mac_addr) != BCME_OK)) {
- /* Fetch the mac address to be used for virtual interface */
- err = -EINVAL;
- goto fail;
+/* Handle discovery ifaces based on policy */
+s32
+wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype, bool *disable_nan, bool *disable_p2p)
+{
+ s32 ret = BCME_OK;
+ wl_iftype_t sec_wl_if_type =
+ wl_cfg80211_get_sec_iface(cfg);
+ *disable_p2p = false;
+ *disable_nan = false;
+
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_NAN) {
+ /* Multi NDP is allowed irrespective of Policy */
+ return BCME_OK;
}
- switch (wl_iftype) {
- case WL_IF_TYPE_IBSS:
- wdev = wl_cfg80211_add_ibss(wiphy, wl_iftype, name);
- break;
- case WL_IF_TYPE_MONITOR:
- wdev = wl_cfg80211_add_monitor_if(wiphy, name);
- break;
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_NAN:
- if (cfg->iface_cnt >= (IFACE_MAX_CNT - 1)) {
- WL_ERR(("iface_cnt exceeds max cnt. created iface_cnt: %d\n",
- cfg->iface_cnt));
- err = -ENOTSUPP;
- goto fail;
+ /*
+ * Check for any policy conflicts with active secondary
+ * interface for incoming discovery iface
+ */
+ if ((sec_wl_if_type != WL_IFACE_NOT_PRESENT) &&
+ (is_discovery_iface(new_wl_iftype))) {
+ switch (cfg->iface_data.policy) {
+ case WL_IF_POLICY_CUSTOM: {
+ if (sec_wl_if_type == WL_IF_TYPE_NAN &&
+ new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ WL_INFORM_MEM(("Allow P2P Discovery with active NDP\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
+ /*
+ * Intentional fall through to default policy
+ * as for AP and associated ifaces, both are same
+ */
}
- wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
- wl_iftype, mac_addr, name);
- break;
- case WL_IF_TYPE_P2P_DISC:
- case WL_IF_TYPE_P2P_GO:
- /* Intentional fall through */
- case WL_IF_TYPE_P2P_GC:
- if (cfg->p2p_supported) {
- wdev = wl_cfg80211_p2p_if_add(cfg, wl_iftype,
- name, mac_addr, &err);
+ case WL_IF_POLICY_DEFAULT: {
+ if (sec_wl_if_type == WL_IF_TYPE_AP) {
+ WL_INFORM_MEM(("AP is active, cant support new iface\n"));
+ ret = BCME_ERROR;
+ } else if (sec_wl_if_type == WL_IF_TYPE_P2P_GC ||
+ sec_wl_if_type == WL_IF_TYPE_P2P_GO) {
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ /*
+ * Associated discovery case,
+ * Fall through
+ */
+ } else {
+ /* Active iface is present, returning error */
+ WL_INFORM_MEM(("P2P group is active,"
+ " cant support new iface\n"));
+ ret = BCME_ERROR;
+ }
+ } else if (sec_wl_if_type == WL_IF_TYPE_NAN) {
+ ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
+ }
break;
}
- /* Intentionally fall through for unsupported interface
- * handling when firmware doesn't support p2p
- */
- default:
- WL_ERR(("Unsupported interface type\n"));
- err = -ENOTSUPP;
- goto fail;
+ case WL_IF_POLICY_FCFS: {
+ WL_INFORM_MEM(("Can't support new iface = %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ break;
+ }
+ case WL_IF_POLICY_LP: {
+ /* Delete existing data iface n allow incoming sec iface */
+ WL_INFORM_MEM(("Remove active sec data interface = %s\n",
+ wl_iftype_to_str(sec_wl_if_type)));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ break;
+ }
+ case WL_IF_POLICY_ROLE_PRIORITY: {
+ WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
+ wl_iftype_to_str(sec_wl_if_type),
+ cfg->iface_data.priority[sec_wl_if_type],
+ wl_iftype_to_str(new_wl_iftype),
+ cfg->iface_data.priority[new_wl_iftype]));
+ if (cfg->iface_data.priority[new_wl_iftype] >
+ cfg->iface_data.priority[sec_wl_if_type]) {
+ WL_INFORM_MEM(("Remove active sec data iface\n"));
+ ret = wl_cfg80211_delete_iface(cfg,
+ sec_wl_if_type);
+ } else {
+ WL_ERR(("Can't support new iface = %s"
+ " due to low priority\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported policy\n"));
+ return BCME_ERROR;
+ }
+ }
+ } else {
+ /*
+ * Handle incoming new secondary iface request,
+ * irrespective of existing discovery ifaces
+ */
+ if ((cfg->iface_data.policy == WL_IF_POLICY_CUSTOM) &&
+ (new_wl_iftype == WL_IF_TYPE_NAN)) {
+ WL_INFORM_MEM(("Allow NAN Data Path\n"));
+ /* No further checks are required. */
+ return BCME_OK;
+ }
}
- if (!wdev) {
- if (err != -ENOTSUPP) {
- err = -ENODEV;
+ /* Check for any conflicting discovery iface */
+ switch (new_wl_iftype) {
+ case WL_IF_TYPE_P2P_DISC:
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ *disable_nan = true;
+ break;
+ }
+ case WL_IF_TYPE_NAN_NMI:
+ case WL_IF_TYPE_NAN: {
+ *disable_p2p = true;
+ break;
+ }
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_AP: {
+ *disable_nan = true;
+ *disable_p2p = true;
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported\n"));
+ return BCME_ERROR;
}
- WL_ERR(("vif create failed. err:%d\n", err));
- goto fail;
}
+ return ret;
+}
- /* Ensure decrementing in case of failure */
- cfg->vif_count++;
+bool
+wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ struct net_device *p2p_ndev = NULL;
+ p2p_ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
- wl_cfg80211_iface_state_ops(wdev,
- WL_IF_CREATE_DONE, wl_iftype, wl_mode);
+ if (new_wl_iftype == WL_IF_TYPE_P2P_DISC && p2p_ndev &&
+ p2p_ndev->ieee80211_ptr &&
+ is_p2p_group_iface(p2p_ndev->ieee80211_ptr)) {
+ return true;
+ }
+#ifdef WL_NAN
+ else if ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) &&
+ (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg)))) {
+ return true;
+ }
+#endif /* WL_NAN */
+ return false;
+}
- WL_INFORM_MEM(("Vif created."
- " dev->ifindex:%d cfg_iftype:%d, vif_count:%d\n",
- (wdev->netdev ? wdev->netdev->ifindex : 0xff),
- wdev->iftype, cfg->vif_count));
- mutex_unlock(&cfg->if_sync);
- return wdev;
+/* Handle incoming discovery iface request */
+s32
+wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
+{
+ s32 ret = BCME_OK;
+ bool disable_p2p = false;
+ bool disable_nan = false;
+
+ wl_iftype_t active_sec_iface =
+ wl_cfg80211_get_sec_iface(cfg);
+
+ if (is_discovery_iface(new_wl_iftype) &&
+ (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
+ if (wl_cfg80211_is_associated_discovery(cfg,
+ new_wl_iftype) == TRUE) {
+ WL_DBG(("Associate iface request is allowed= %s\n",
+ wl_iftype_to_str(new_wl_iftype)));
+ return ret;
+ }
+ }
-fail:
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+ ret = wl_cfg80211_disc_if_mgmt(cfg, new_wl_iftype,
+ &disable_nan, &disable_p2p);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed at disc iface mgmt, ret = %d\n", ret));
+ return ret;
+ }
+#ifdef WL_NANP2P
+ if (((new_wl_iftype == WL_IF_TYPE_P2P_DISC) && disable_nan) ||
+ ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) && disable_p2p)) {
+ if ((cfg->nan_p2p_supported == TRUE) &&
+ (cfg->conc_disc == WL_NANP2P_CONC_SUPPORT)) {
+ WL_INFORM_MEM(("P2P + NAN conc is supported\n"));
+ disable_p2p = false;
+ disable_nan = false;
+ }
+ }
+#endif /* WL_NANP2P */
- if (err != -ENOTSUPP) {
- /* For non-supported interfaces, just return error and
- * skip below recovery steps.
- */
- SUPP_LOG(("IF_ADD fail. err:%d\n", err));
- wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
-#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
- if (dhd->memdump_enabled) {
- dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
- dhd_bus_mem_dump(dhd);
+ if (disable_nan) {
+#ifdef WL_NAN
+ /* Disable nan */
+ cfg->nancfg.disable_reason = NAN_CONCURRENCY_CONFLICT;
+ ret = wl_cfgnan_disable(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ return ret;
}
-#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
- dhd->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
- net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+#endif /* WL_NAN */
}
- mutex_unlock(&cfg->if_sync);
- return NULL;
+
+ if (disable_p2p) {
+ /* Disable p2p discovery */
+ ret = wl_cfg80211_deinit_p2p_discovery(cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to disable p2p_disc for allowing nan\n"));
+ return ret;
+ }
+ }
+ return ret;
}
-static bcm_struct_cfgdev *
-wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
-#if defined(WL_CFG80211_P2P_DEV_IF)
- const char *name,
-#else
- char *name,
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
- unsigned char name_assign_type,
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
- enum nl80211_iftype type,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
- u32 *flags,
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
- struct vif_params *params)
+/*
+* Check for any conflicting iface before adding iface.
+* Based on policy, either conflicting iface is removed
+* or new iface add request is blocked.
+*/
+s32
+wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg,
+ wl_iftype_t new_wl_iftype)
{
- u16 wl_iftype;
- u16 wl_mode;
- struct net_device *primary_ndev;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev *wdev;
+ s32 ret = BCME_OK;
- WL_DBG(("Enter iftype: %d\n", type));
- if (!cfg) {
- return ERR_PTR(-EINVAL);
- }
+ WL_INFORM_MEM(("Incoming iface = %s\n", wl_iftype_to_str(new_wl_iftype)));
- /* Use primary I/F for sending cmds down to firmware */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
- WL_ERR(("device is not ready\n"));
- return ERR_PTR(-ENODEV);
+ if (!is_discovery_iface(new_wl_iftype)) {
+ /* Incoming data interface request */
+ if (wl_cfg80211_get_sec_iface(cfg) != WL_IFACE_NOT_PRESENT) {
+ /* active interface present - Apply interface data policy */
+ ret = wl_cfg80211_data_if_mgmt(cfg, new_wl_iftype);
+ if (ret != BCME_OK) {
+ WL_ERR(("if_mgmt fail:%d\n", ret));
+ return ret;
+ }
+ }
}
+ /* Apply discovery config */
+ ret = wl_cfg80211_handle_discovery_config(cfg, new_wl_iftype);
+ return ret;
+}
+#endif /* WL_IFACE_MGMT */
- if (!name) {
- WL_ERR(("Interface name not provided \n"));
- return ERR_PTR(-EINVAL);
- }
+static struct wireless_dev *
+wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ WL_ERR(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+ return ERR_PTR(-EOPNOTSUPP);
+#else
+ struct wireless_dev *wdev;
+ struct net_device* ndev = NULL;
- if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
- return ERR_PTR(-EINVAL);
- }
+ dhd_add_monitor(name, &ndev);
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, name, NULL);
- if (unlikely(!wdev)) {
- return ERR_PTR(-ENODEV);
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ goto fail;
}
- return wdev_to_cfgdev(wdev);
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_MONITOR;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wiphy));
+
+ WL_DBG(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev->ieee80211_ptr;
+fail:
+ return ERR_PTR(-EOPNOTSUPP);
+#endif // endif
}
-static s32
-wl_cfg80211_del_ibss(struct wiphy *wiphy, struct wireless_dev *wdev)
+static struct wireless_dev *
+wl_cfg80211_add_ibss(struct wiphy *wiphy, u16 wl_iftype, char const *name)
{
- WL_INFORM_MEM(("del ibss wdev_ptr:%p\n", wdev));
#ifdef WLAIBSS_MCHAN
/* AIBSS */
- return bcm_cfg80211_del_ibss_if(wiphy, wdev);
+ return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
#else
/* Normal IBSS */
- return wl_cfg80211_del_iface(wiphy, wdev);
+ WL_ERR(("IBSS not supported on Virtual iface\n"));
+ return NULL;
#endif // endif
}
s32
-wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- struct wireless_dev *wdev, char *ifname)
+wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype)
{
- int ret = BCME_OK;
- s32 bssidx;
- struct wiphy *wiphy;
- u16 wl_mode;
- u16 wl_iftype;
- struct net_info *netinfo;
- dhd_pub_t *dhd;
- BCM_REFERENCE(dhd);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 org_toggle_bytes;
+ u16 cur_toggle_bytes;
+ u16 toggled_bit;
- if (!cfg) {
+ if (!ndev || !mac_addr || ETHER_ISNULLADDR(mac_addr)) {
return -EINVAL;
}
+ WL_DBG(("%s:Mac addr" MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(mac_addr)));
- mutex_lock(&cfg->if_sync);
- dhd = (dhd_pub_t *)(cfg->pub);
-
- if (!wdev && ifname) {
- /* If only ifname is provided, fetch corresponding wdev ptr from our
- * internal data structure
+ if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_AP) ||
+ (wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
+ /* Avoid invoking release mac addr code for interfaces using
+ * fixed mac addr.
*/
- wdev = wl_cfg80211_get_wdev_from_ifname(cfg, ifname);
+ return BCME_OK;
}
- /* Check whether we have a valid wdev ptr */
- if (unlikely(!wdev)) {
- WL_ERR(("wdev not found. '%s' does not exists\n", ifname));
- mutex_unlock(&cfg->if_sync);
- return -ENODEV;
+ /* Fetch last two bytes of mac address */
+ org_toggle_bytes = ntoh16(*((u16 *)&ndev->dev_addr[4]));
+ cur_toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+
+ toggled_bit = (org_toggle_bytes ^ cur_toggle_bytes);
+ WL_DBG(("org_toggle_bytes:%04X cur_toggle_bytes:%04X\n",
+ org_toggle_bytes, cur_toggle_bytes));
+ if (toggled_bit & cfg->vif_macaddr_mask) {
+ /* This toggled_bit is marked in the used mac addr
+ * mask. Clear it.
+ */
+ cfg->vif_macaddr_mask &= ~toggled_bit;
+ WL_INFORM(("MAC address - " MACDBG " released. toggled_bit:%04X vif_mask:%04X\n",
+ MAC2STRDBG(mac_addr), toggled_bit, cfg->vif_macaddr_mask));
+ } else {
+ WL_ERR(("MAC address - " MACDBG " not found in the used list."
+ " toggled_bit:%04x vif_mask:%04x\n", MAC2STRDBG(mac_addr),
+ toggled_bit, cfg->vif_macaddr_mask));
+ return -EINVAL;
}
- WL_INFORM_MEM(("del vif. wdev_ptr:%p cfg_iftype:%d\n", wdev, wdev->iftype));
+ return BCME_OK;
+}
- wiphy = wdev->wiphy;
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- /* p2p discovery would be de-initialized in stop p2p
- * device context/from other virtual i/f creation context
- * so netinfo list may not have any node corresponding to
- * discovery I/F. Handle it before bssidx check.
+s32
+wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr)
+{
+#ifdef WL_P2P_USE_RANDMAC
+ struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+#endif // endif
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u16 toggle_mask;
+ u16 toggle_bit;
+ u16 toggle_bytes;
+ u16 used;
+ u32 offset = 0;
+ /* Toggle mask starts from MSB of second last byte */
+ u16 mask = 0x8000;
+ if (!mac_addr) {
+ return -EINVAL;
+ }
+#ifdef WL_P2P_USE_RANDMAC
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ memcpy_s(mac_addr, ETH_ALEN, p2p_dev_addr->octet, ETH_ALEN);
+ return 0;
+ }
+#endif // endif
+ memcpy(mac_addr, ndev->dev_addr, ETH_ALEN);
+/*
+ * VIF MAC address managment
+ * P2P Device addres: Primary MAC with locally admin. bit set
+ * P2P Group address/NAN NMI/Softap/NAN DPI: Primary MAC addr
+ * with local admin bit set and one additional bit toggled.
+ * cfg->vif_macaddr_mask will hold the info regarding the mac address
+ * released. Ensure to call wl_release_vif_macaddress to free up
+ * the mac address.
+ */
+#if defined(SPECIFIC_MAC_GEN_SCHEME)
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
+ mac_addr[0] |= 0x02;
+ } else if ((wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
+ mac_addr[0] |= 0x02;
+ mac_addr[4] ^= 0x80;
+ }
+#else
+ if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
+ mac_addr[0] |= 0x02;
+ }
+#endif /* SEPCIFIC_MAC_GEN_SCHEME */
+ else {
+ /* For locally administered mac addresses, we keep the
+ * OUI part constant and just work on the last two bytes.
*/
- ret = wl_cfg80211_p2p_if_del(wiphy, wdev);
- if (unlikely(ret)) {
- goto exit;
- } else {
- /* success case. return from here */
- if (cfg->vif_count) {
- cfg->vif_count--;
+ mac_addr[0] |= 0x02;
+ toggle_mask = cfg->vif_macaddr_mask;
+ toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+ do {
+ used = toggle_mask & mask;
+ if (!used) {
+ /* Use this bit position */
+ toggle_bit = mask >> offset;
+ toggle_bytes ^= toggle_bit;
+ cfg->vif_macaddr_mask |= toggle_bit;
+ WL_DBG(("toggle_bit:%04X toggle_bytes:%04X toggle_mask:%04X\n",
+ toggle_bit, toggle_bytes, cfg->vif_macaddr_mask));
+ /* Macaddress are stored in network order */
+ mac_addr[5] = *((u8 *)&toggle_bytes);
+ mac_addr[4] = *(((u8 *)&toggle_bytes + 1));
+ break;
}
- mutex_unlock(&cfg->if_sync);
- return BCME_OK;
- }
+
+ /* Shift by one */
+ toggle_mask = toggle_mask << 0x1;
+ offset++;
+ if (offset > MAX_VIF_OFFSET) {
+ /* We have used up all macaddresses. Something wrong! */
+ WL_ERR(("Entire range of macaddress used up.\n"));
+ ASSERT(0);
+ break;
+ }
+ } while (true);
+ }
+ WL_INFORM_MEM(("Get virtual I/F mac addr: "MACDBG"\n", MAC2STRDBG(mac_addr)));
+ return 0;
+}
+#ifdef DNGL_AXI_ERROR_LOGGING
+static s32
+_wl_cfg80211_check_axi_error(struct bcm_cfg80211 *cfg)
+{
+ s32 ret = BCME_OK;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ hnd_ext_trap_hdr_t *hdr;
+ int axi_host_error_size;
+ uint8 *new_dst;
+ uint32 *ext_data = dhd->extended_trap_data;
+ struct file *fp = NULL;
+ char *filename = DHD_COMMON_DUMP_PATH
+ DHD_DUMP_AXI_ERROR_FILENAME
+ DHD_DUMP_HAL_FILENAME_SUFFIX;
+
+ WL_ERR(("%s: starts to read %s. Axi error \n", __FUNCTION__, filename));
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp) || (fp == NULL)) {
+ WL_ERR(("%s: Couldn't read the file, err %ld,File [%s] No previous axi error \n",
+ __FUNCTION__, PTR_ERR(fp), filename));
+ return ret;
}
-#endif /* WL_CFG80211_P2P_DEV_IF */
- if ((netinfo = wl_get_netinfo_by_wdev(cfg, wdev)) == NULL) {
- WL_ERR(("Find netinfo from wdev %p failed\n", wdev));
- ret = -ENODEV;
- goto exit;
+ kernel_read_compat(fp, fp->f_pos, (char *)dhd->axi_err_dump, sizeof(dhd_axi_error_dump_t));
+ filp_close(fp, NULL);
+
+ /* Delete axi error info file */
+ if (dhd_file_delete(filename) < 0) {
+ WL_ERR(("%s(): Failed to delete file: %s\n", __FUNCTION__, filename));
+ return ret;
}
+ WL_ERR(("%s(): Success to delete file: %s\n", __FUNCTION__, filename));
- if (!wdev->netdev) {
- WL_ERR(("ndev null! \n"));
- } else {
- /* Disable tx before del */
- netif_tx_disable(wdev->netdev);
+ if (dhd->axi_err_dump->etd_axi_error_v1.signature != HND_EXT_TRAP_AXIERROR_SIGNATURE) {
+ WL_ERR(("%s: Invalid AXI signature: 0x%x\n",
+ __FUNCTION__, dhd->axi_err_dump->etd_axi_error_v1.signature));
}
- wl_iftype = netinfo->iftype;
- wl_mode = wl_iftype_to_mode(wl_iftype);
- bssidx = netinfo->bssidx;
- WL_INFORM_MEM(("[IFDEL] cfg_iftype:%d wl_iftype:%d mode:%d bssidx:%d\n",
- wdev->iftype, wl_iftype, wl_mode, bssidx));
+ /* First word is original trap_data */
+ ext_data++;
- /* Do pre-interface del ops */
- wl_cfg80211_iface_state_ops(wdev, WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+ /* Followed by the extended trap data header */
+ hdr = (hnd_ext_trap_hdr_t *)ext_data;
+ new_dst = hdr->data;
- switch (wl_iftype) {
- case WL_IF_TYPE_P2P_GO:
- case WL_IF_TYPE_P2P_GC:
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_NAN:
- ret = wl_cfg80211_del_iface(wiphy, wdev);
- break;
- case WL_IF_TYPE_IBSS:
- ret = wl_cfg80211_del_ibss(wiphy, wdev);
- break;
+ axi_host_error_size = sizeof(dhd->axi_err_dump->axid)
+ + sizeof(dhd->axi_err_dump->fault_address);
- default:
- WL_ERR(("Unsupported interface type\n"));
- ret = BCME_ERROR;
+ /* TAG_TRAP_AXI_HOST_INFO tlv : host's axid, fault address */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_HOST_INFO,
+ (const void *)dhd->axi_err_dump,
+ axi_host_error_size, new_dst);
+
+ /* TAG_TRAP_AXI_ERROR tlv */
+ new_dst = bcm_write_tlv(TAG_TRAP_AXI_ERROR,
+ (const void *)&dhd->axi_err_dump->etd_axi_error_v1,
+ sizeof(dhd->axi_err_dump->etd_axi_error_v1), new_dst);
+ hdr->len = new_dst - hdr->data;
+
+ dhd->dongle_trap_occured = TRUE;
+ memset(dhd->axi_err_dump, 0, sizeof(dhd_axi_error_dump_t));
+
+ dhd->hang_reason = HANG_REASON_DONGLE_TRAP;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ ret = BCME_ERROR;
+ return ret;
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+/* All Android/Linux private/Vendor Interface calls should make
+ * use of below API for interface creation.
+ */
+struct wireless_dev *
+wl_cfg80211_add_if(struct bcm_cfg80211 *cfg,
+ struct net_device *primary_ndev,
+ wl_iftype_t wl_iftype, const char *name, u8 *mac)
+{
+ u8 mac_addr[ETH_ALEN];
+ s32 err = -ENODEV;
+ struct wireless_dev *wdev = NULL;
+ struct wiphy *wiphy;
+ s32 wl_mode;
+ dhd_pub_t *dhd;
+ wl_iftype_t macaddr_iftype = wl_iftype;
+
+ WL_INFORM_MEM(("if name: %s, wl_iftype:%d \n",
+ name ? name : "NULL", wl_iftype));
+ if (!cfg || !primary_ndev || !name) {
+ WL_ERR(("cfg/ndev/name ptr null\n"));
+ return NULL;
+ }
+ if (wl_cfg80211_get_wdev_from_ifname(cfg, name)) {
+ WL_ERR(("Interface name %s exists!\n", name));
+ return NULL;
}
-exit:
- if (ret == BCME_OK) {
- /* Successful case */
- if (cfg->vif_count) {
- cfg->vif_count--;
- }
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_DELETE_DONE, wl_iftype, wl_mode);
+ wiphy = bcmcfg_to_wiphy(cfg);
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd) {
+ return NULL;
+ }
+
+ if ((wl_mode = wl_iftype_to_mode(wl_iftype)) < 0) {
+ return NULL;
+ }
+ mutex_lock(&cfg->if_sync);
#ifdef WL_NAN
- if (!((cfg->nancfg.mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
+ if (wl_iftype == WL_IF_TYPE_NAN) {
+ /*
+ * Bypass the role conflict check for NDI and handle it
+ * from dp req and dp resp context
+ * because in aware comms, ndi gets created soon after nan enable.
+ */
+ } else
#endif /* WL_NAN */
- {
- wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
- }
- WL_INFORM_MEM(("vif deleted. vif_count:%d\n", cfg->vif_count));
- } else {
- if (!wdev->netdev) {
- WL_ERR(("ndev null! \n"));
- } else {
- /* IF del failed. revert back tx queue status */
- netif_tx_start_all_queues(wdev->netdev);
- }
+#ifdef WL_IFACE_MGMT
+ if ((err = wl_cfg80211_handle_if_role_conflict(cfg, wl_iftype)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+ }
+#endif /* WL_IFACE_MGMT */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ /* Check the previous smmu fault error */
+ if ((err = _wl_cfg80211_check_axi_error(cfg)) < 0) {
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+ /* Protect the interace op context */
+ /* Do pre-create ops */
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr, WL_IF_CREATE_REQ,
+ wl_iftype, wl_mode);
- /* Skip generating log files and sending HANG event
- * if driver state is not READY
- */
- if (wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg))) {
- SUPP_LOG(("IF_DEL fail. err:%d\n", ret));
- wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
-#if defined(DHD_FW_COREDUMP)
- if (dhd->memdump_enabled && (ret != -EBADTYPE)) {
- dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
- dhd_bus_mem_dump(dhd);
+ if (strnicmp(name, SOFT_AP_IF_NAME, strlen(SOFT_AP_IF_NAME)) == 0) {
+ macaddr_iftype = WL_IF_TYPE_AP;
+ }
+
+ if (mac) {
+ /* If mac address is provided, use that */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ } else if ((wl_get_vif_macaddr(cfg, macaddr_iftype, mac_addr) != BCME_OK)) {
+ /* Fetch the mac address to be used for virtual interface */
+ err = -EINVAL;
+ goto fail;
+ }
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_IBSS:
+ wdev = wl_cfg80211_add_ibss(wiphy, wl_iftype, name);
+ break;
+ case WL_IF_TYPE_MONITOR:
+ wdev = wl_cfg80211_add_monitor_if(wiphy, name);
+ break;
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_NAN:
+ if (cfg->iface_cnt >= (IFACE_MAX_CNT - 1)) {
+ WL_ERR(("iface_cnt exceeds max cnt. created iface_cnt: %d\n",
+ cfg->iface_cnt));
+ err = -ENOTSUPP;
+ goto fail;
}
-#endif /* BCMDONGLEHOST && DHD_FW_COREDUMP */
- WL_ERR(("Notify hang event to upper layer \n"));
- dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
- net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ wl_iftype, mac_addr, name);
+ break;
+ case WL_IF_TYPE_P2P_DISC:
+ case WL_IF_TYPE_P2P_GO:
+ /* Intentional fall through */
+ case WL_IF_TYPE_P2P_GC:
+ if (cfg->p2p_supported) {
+ wdev = wl_cfg80211_p2p_if_add(cfg, wl_iftype,
+ name, mac_addr, &err);
+ break;
+ }
+ /* Intentionally fall through for unsupported interface
+ * handling when firmware doesn't support p2p
+ */
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+
+ if (!wdev) {
+ WL_ERR(("vif create failed. err:%d\n", err));
+ if (err != -ENOTSUPP) {
+ err = -ENODEV;
}
+ goto fail;
}
+ /* Ensure decrementing in case of failure */
+ cfg->vif_count++;
+
+ wl_cfg80211_iface_state_ops(wdev,
+ WL_IF_CREATE_DONE, wl_iftype, wl_mode);
+
+ WL_INFORM_MEM(("Vif created. dev->ifindex:%d"
+ " cfg_iftype:%d, vif_count:%d\n",
+ (wdev->netdev ? wdev->netdev->ifindex : 0xff),
+ wdev->iftype, cfg->vif_count));
mutex_unlock(&cfg->if_sync);
- return ret;
+ return wdev;
+
+fail:
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
+ WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+
+ if (err != -ENOTSUPP) {
+ /* For non-supported interfaces, just return error and
+ * skip below recovery steps.
+ */
+ SUPP_LOG(("IF_ADD fail. err:%d\n", err));
+ wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+ if (dhd_query_bus_erros(dhd)) {
+ goto exit;
+ }
+ dhd->iface_op_failed = TRUE;
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+ dhd->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ }
+exit:
+ mutex_unlock(&cfg->if_sync);
+ return NULL;
}
-static s32
-wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ const char *name,
+#else
+ char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
+ struct vif_params *params)
{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev);
- int ret = BCME_OK;
u16 wl_iftype;
u16 wl_mode;
struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev *wdev;
+ WL_DBG(("Enter iftype: %d\n", type));
if (!cfg) {
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
+ /* Use primary I/F for sending cmds down to firmware */
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- wdev = cfgdev_to_wdev(cfgdev);
- if (!wdev) {
- WL_ERR(("wdev null"));
- return -ENODEV;
+ if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+ WL_ERR(("device is not ready\n"));
+ return ERR_PTR(-ENODEV);
}
- WL_DBG(("Enter wdev:%p iftype: %d\n", wdev, wdev->iftype));
- if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
- WL_ERR(("Wrong iftype: %d\n", wdev->iftype));
- return -ENODEV;
+ if (!name) {
+ WL_ERR(("Interface name not provided \n"));
+ return ERR_PTR(-EINVAL);
}
- if ((ret = wl_cfg80211_del_if(cfg, primary_ndev,
- wdev, NULL)) < 0) {
- WL_ERR(("IF del failed\n"));
+ if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
+ return ERR_PTR(-EINVAL);
}
- return ret;
+ wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, name, NULL);
+ if (unlikely(!wdev)) {
+ return ERR_PTR(-ENODEV);
+ }
+ return wdev_to_cfgdev(wdev);
}
static s32
-wl_cfg80211_change_p2prole(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type)
+wl_cfg80211_del_ibss(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- s32 wlif_type;
- s32 mode = 0;
- s32 index;
- s32 err;
- s32 conn_idx = -1;
- chanspec_t chspec;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-
- WL_INFORM_MEM(("Enter. current_role:%d new_role:%d \n", ndev->ieee80211_ptr->iftype, type));
-
- if (!cfg->p2p || !wl_cfgp2p_vif_created(cfg)) {
- WL_ERR(("P2P not initialized \n"));
- return -EINVAL;
- }
+ WL_INFORM_MEM(("del ibss wdev_ptr:%p\n", wdev));
+#ifdef WLAIBSS_MCHAN
+ /* AIBSS */
+ return bcm_cfg80211_del_ibss_if(wiphy, wdev);
+#else
+ /* Normal IBSS */
+ return wl_cfg80211_del_iface(wiphy, wdev);
+#endif // endif
+}
- if (!is_p2p_group_iface(ndev->ieee80211_ptr)) {
- WL_ERR(("Wrong if type \n"));
+s32
+wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname)
+{
+ int ret = BCME_OK;
+ mutex_lock(&cfg->if_sync);
+ ret = _wl_cfg80211_del_if(cfg, primary_ndev, wdev, ifname);
+ mutex_unlock(&cfg->if_sync);
+ return ret;
+}
+
+s32
+_wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname)
+{
+ int ret = BCME_OK;
+ s32 bssidx;
+ struct wiphy *wiphy;
+ u16 wl_mode;
+ u16 wl_iftype;
+ struct net_info *netinfo;
+ dhd_pub_t *dhd;
+ BCM_REFERENCE(dhd);
+
+ if (!cfg) {
+ return -EINVAL;
+ }
+
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+ if (!wdev && ifname) {
+ /* If only ifname is provided, fetch corresponding wdev ptr from our
+ * internal data structure
+ */
+ wdev = wl_cfg80211_get_wdev_from_ifname(cfg, ifname);
+ }
+
+ /* Check whether we have a valid wdev ptr */
+ if (unlikely(!wdev)) {
+ WL_ERR(("wdev not found. '%s' does not exists\n", ifname));
+ return -ENODEV;
+ }
+
+ WL_INFORM_MEM(("del vif. wdev cfg_iftype:%d\n", wdev->iftype));
+
+ wiphy = wdev->wiphy;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ /* p2p discovery would be de-initialized in stop p2p
+ * device context/from other virtual i/f creation context
+ * so netinfo list may not have any node corresponding to
+ * discovery I/F. Handle it before bssidx check.
+ */
+ ret = wl_cfg80211_p2p_if_del(wiphy, wdev);
+ if (unlikely(ret)) {
+ goto exit;
+ } else {
+ /* success case. return from here */
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ if ((netinfo = wl_get_netinfo_by_wdev(cfg, wdev)) == NULL) {
+ WL_ERR(("Find netinfo from wdev %p failed\n", wdev));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ if (!wdev->netdev) {
+ WL_ERR(("ndev null! \n"));
+ } else {
+ /* Disable tx before del */
+ netif_tx_disable(wdev->netdev);
+ }
+
+ wl_iftype = netinfo->iftype;
+ wl_mode = wl_iftype_to_mode(wl_iftype);
+ bssidx = netinfo->bssidx;
+ WL_INFORM_MEM(("[IFDEL] cfg_iftype:%d wl_iftype:%d mode:%d bssidx:%d\n",
+ wdev->iftype, wl_iftype, wl_mode, bssidx));
+
+ /* Do pre-interface del ops */
+ wl_cfg80211_iface_state_ops(wdev, WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_NAN:
+ ret = wl_cfg80211_del_iface(wiphy, wdev);
+ break;
+ case WL_IF_TYPE_IBSS:
+ ret = wl_cfg80211_del_ibss(wiphy, wdev);
+ break;
+
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ ret = BCME_ERROR;
+ }
+
+exit:
+ if (ret == BCME_OK) {
+ /* Successful case */
+ if (cfg->vif_count) {
+ cfg->vif_count--;
+ }
+ wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
+ WL_IF_DELETE_DONE, wl_iftype, wl_mode);
+#ifdef WL_NAN
+ if (!((cfg->nancfg.mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
+#endif /* WL_NAN */
+ {
+ wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
+ }
+ WL_INFORM_MEM(("vif deleted. vif_count:%d\n", cfg->vif_count));
+ } else {
+ if (!wdev->netdev) {
+ WL_ERR(("ndev null! \n"));
+ } else {
+ /* IF del failed. revert back tx queue status */
+ netif_tx_start_all_queues(wdev->netdev);
+ }
+
+ /* Skip generating log files and sending HANG event
+ * if driver state is not READY
+ */
+ if (wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg))) {
+ SUPP_LOG(("IF_DEL fail. err:%d\n", ret));
+ wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
+ /* IF dongle is down due to previous hang or other conditions, sending
+ * one more hang notification is not needed.
+ */
+ if (dhd_query_bus_erros(dhd) || (ret == BCME_DONGLE_DOWN)) {
+ goto end;
+ }
+ dhd->iface_op_failed = TRUE;
+#if defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled && (ret != -EBADTYPE)) {
+ dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ }
+ }
+end:
+ return ret;
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev);
+ int ret = BCME_OK;
+ u16 wl_iftype;
+ u16 wl_mode;
+ struct net_device *primary_ndev;
+
+ if (!cfg) {
+ return -EINVAL;
+ }
+
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ wdev = cfgdev_to_wdev(cfgdev);
+ if (!wdev) {
+ WL_ERR(("wdev null"));
+ return -ENODEV;
+ }
+
+ WL_DBG(("Enter wdev:%p iftype: %d\n", wdev, wdev->iftype));
+ if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
+ WL_ERR(("Wrong iftype: %d\n", wdev->iftype));
+ return -ENODEV;
+ }
+
+ if ((ret = wl_cfg80211_del_if(cfg, primary_ndev,
+ wdev, NULL)) < 0) {
+ WL_ERR(("IF del failed\n"));
+ }
+
+ return ret;
+}
+
+static s32
+wl_cfg80211_change_p2prole(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type)
+{
+ s32 wlif_type;
+ s32 mode = 0;
+ s32 index;
+ s32 err;
+ s32 conn_idx = -1;
+ chanspec_t chspec;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_INFORM_MEM(("Enter. current_role:%d new_role:%d \n", ndev->ieee80211_ptr->iftype, type));
+
+ if (!cfg->p2p || !wl_cfgp2p_vif_created(cfg)) {
+ WL_ERR(("P2P not initialized \n"));
+ return -EINVAL;
+ }
+
+ if (!is_p2p_group_iface(ndev->ieee80211_ptr)) {
+ WL_ERR(("Wrong if type \n"));
return -EINVAL;
}
/* Abort any on-going scans to avoid race condition issues */
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
if (index < 0) {
enum nl80211_iftype type,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
u32 *flags,
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
-
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
struct vif_params *params)
{
s32 infra = 1;
mutex_lock(&cfg->if_sync);
netinfo = wl_get_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
if (unlikely(!netinfo)) {
+#ifdef WL_STATIC_IF
if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
- WL_MEM(("skip change vif for static if\n"));
+ /* Incase of static interfaces, the netinfo will be
+ * allocated only when FW interface is initialized. So
+ * store the value and use it during initialization.
+ */
+ WL_INFORM_MEM(("skip change vif for static if\n"));
+ ndev->ieee80211_ptr->iftype = type;
err = BCME_OK;
- } else {
+ } else
+#endif /* WL_STATIC_IF */
+ {
WL_ERR(("netinfo not found \n"));
err = -ENODEV;
}
/* intentional fall through */
case NL80211_IFTYPE_AP_VLAN:
{
- if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ if (!wl_get_drv_status(cfg, AP_CREATED, ndev) &&
+ wl_get_drv_status(cfg, READY, ndev)) {
err = wl_cfg80211_set_ap_role(cfg, ndev);
if (unlikely(err)) {
WL_ERR(("set ap role failed!\n"));
goto fail;
}
- err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(s32));
- if (err < 0) {
- WL_ERR(("SET INFRA/IBSS error %d\n", err));
- goto fail;
+ if (wl_get_drv_status(cfg, READY, ndev)) {
+ err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA/IBSS error %d\n", err));
+ goto fail;
+ }
}
wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
ndev->ieee80211_ptr->iftype = type;
netinfo->iftype = wl_iftype;
WL_INFORM_MEM(("[%s] cfg_iftype changed to %d\n", ndev->name, type));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_iftype(ndev, netinfo->ifidx, wl_iftype);
+#endif
fail:
if (err) {
{
bool ifadd_expected = FALSE;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bool bss_pending_op = TRUE;
/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
* redirect the IF_ADD event to ifchange as it is not a real "new" interface
wl_clr_p2p_status(cfg, IF_ADDING);
} else if (cfg->bss_pending_op) {
ifadd_expected = TRUE;
- cfg->bss_pending_op = FALSE;
+ bss_pending_op = FALSE;
}
if (ifadd_expected) {
if_event_info->ifidx = ifidx;
if_event_info->bssidx = bssidx;
if_event_info->role = role;
- strncpy(if_event_info->name, name, IFNAMSIZ);
- if_event_info->name[IFNAMSIZ] = '\0';
+ strlcpy(if_event_info->name, name, sizeof(if_event_info->name));
+ if_event_info->name[IFNAMSIZ - 1] = '\0';
if (mac)
memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
WL_INFORM_MEM(("IF_ADD ifidx:%d bssidx:%d role:%d\n",
ifidx, bssidx, role));
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
bool ifdel_expected = FALSE;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
wl_if_event_info *if_event_info = &cfg->if_event_info;
+ bool bss_pending_op = TRUE;
if (wl_get_p2p_status(cfg, IF_DELETING)) {
ifdel_expected = TRUE;
wl_clr_p2p_status(cfg, IF_DELETING);
} else if (cfg->bss_pending_op) {
ifdel_expected = TRUE;
- cfg->bss_pending_op = FALSE;
+ bss_pending_op = FALSE;
}
if (ifdel_expected) {
if_event_info->valid = TRUE;
if_event_info->ifidx = ifidx;
if_event_info->bssidx = bssidx;
+ /* Update bss pendig operation status */
+ if (!bss_pending_op) {
+ cfg->bss_pending_op = FALSE;
+ }
WL_INFORM_MEM(("IF_DEL ifidx:%d bssidx:%d\n", ifidx, bssidx));
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
if (wl_get_p2p_status(cfg, IF_CHANGING)) {
wl_set_p2p_status(cfg, IF_CHANGED);
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
return BCME_OK;
}
return BCME_ERROR;
}
-/* Find listen channel */
-static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
- const u8 *ie, u32 ie_len)
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
{
- const wifi_p2p_ie_t *p2p_ie;
- const u8 *end, *pos;
- s32 listen_channel;
-
- pos = (const u8 *)ie;
-
- p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+ s32 err = 0;
- if (p2p_ie == NULL) {
- return 0;
+ err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
}
+ return err;
+}
- if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
- CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
- return 0;
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
}
+ return err;
+}
- pos = p2p_ie->subelts;
- end = p2p_ie->subelts + (p2p_ie->len - 4);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
- CFGP2P_DBG((" found p2p ie ! lenth %d \n",
- p2p_ie->len));
+#ifdef CUSTOM_LONG_RETRY_LIMIT
+ if ((cmd == WLC_SET_LRL) &&
+ (retry != CUSTOM_LONG_RETRY_LIMIT)) {
+ WL_DBG(("CUSTOM_LONG_RETRY_LIMIT is used.Ignore configuration"));
+ return err;
+ }
+#endif /* CUSTOM_LONG_RETRY_LIMIT */
- while (pos < end) {
- uint16 attr_len;
- if (pos + 2 >= end) {
- CFGP2P_DBG((" -- Invalid P2P attribute"));
- return 0;
- }
- attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
-
- if (pos + 3 + attr_len > end) {
- CFGP2P_DBG(("P2P: Attribute underflow "
- "(len=%u left=%d)",
- attr_len, (int) (end - pos - 3)));
- return 0;
- }
-
- /* if Listen Channel att id is 6 and the vailue is valid,
- * return the listen channel
- */
- if (pos[0] == 6) {
- /* listen channel subel length format
- * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
- */
- listen_channel = pos[1 + 2 + 3 + 1];
-
- if (listen_channel == SOCIAL_CHAN_1 ||
- listen_channel == SOCIAL_CHAN_2 ||
- listen_channel == SOCIAL_CHAN_3) {
- CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
- return listen_channel;
- }
- }
- pos += 3 + attr_len;
+ retry = htod32(retry);
+ err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
}
- return 0;
+ return err;
}
-static void wl_scan_prep(struct bcm_cfg80211 *cfg, struct wl_scan_params *params,
- struct cfg80211_scan_request *request)
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- u32 n_ssids;
- u32 n_channels;
- u16 channel;
- chanspec_t chanspec;
- s32 i = 0, j = 0, offset;
- char *ptr;
- wlc_ssid_t ssid;
-
- memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = -1;
- params->active_time = -1;
- params->passive_time = -1;
- params->home_time = -1;
- params->channel_num = 0;
- memset(¶ms->ssid, 0, sizeof(wlc_ssid_t));
-
- WL_SCAN(("Preparing Scan request\n"));
- WL_SCAN(("nprobes=%d\n", params->nprobes));
- WL_SCAN(("active_time=%d\n", params->active_time));
- WL_SCAN(("passive_time=%d\n", params->passive_time));
- WL_SCAN(("home_time=%d\n", params->home_time));
- WL_SCAN(("scan_type=%d\n", params->scan_type));
-
- params->nprobes = htod32(params->nprobes);
- params->active_time = htod32(params->active_time);
- params->passive_time = htod32(params->passive_time);
- params->home_time = htod32(params->home_time);
-
- /* if request is null just exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
- n_ssids = request->n_ssids;
- n_channels = request->n_channels;
-
- /* Copy channel array if applicable */
- WL_SCAN(("### List of channelspecs to scan ###\n"));
- if (n_channels > 0) {
- for (i = 0; i < n_channels; i++) {
- channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
- /* SKIP DFS channels for Secondary interface */
- if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
- (request->channels[i]->flags &
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
-#else
- (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
- continue;
- if (!dhd_conf_match_channel(cfg->pub, channel))
- continue;
-
- chanspec = WL_CHANSPEC_BW_20;
- if (chanspec == INVCHANSPEC) {
- WL_ERR(("Invalid chanspec! Skipping channel\n"));
- continue;
- }
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
- if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_5G) {
- WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_2G;
- } else {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_2G) {
- WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_5G;
- }
- params->channel_list[j] = channel;
- params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
- params->channel_list[j] |= chanspec;
- WL_SCAN(("Chan : %d, Channel spec: %x \n",
- channel, params->channel_list[j]));
- params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
- j++;
- }
- } else {
- WL_SCAN(("Scanning all channels\n"));
- }
- n_channels = j;
- /* Copy ssid array if applicable */
- WL_SCAN(("### List of SSIDs to scan ###\n"));
- if (n_ssids > 0) {
- offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
- offset = roundup(offset, sizeof(u32));
- ptr = (char*)params + offset;
- for (i = 0; i < n_ssids; i++) {
- memset(&ssid, 0, sizeof(wlc_ssid_t));
- ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
- memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
- if (!ssid.SSID_len)
- WL_SCAN(("%d: Broadcast scan\n", i));
- else
- WL_SCAN(("%d: scan for %s size =%d\n", i,
- ssid.SSID, ssid.SSID_len));
- memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
- ptr += sizeof(wlc_ssid_t);
- }
- } else {
- WL_SCAN(("Broadcast scan\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_DBG(("Enter\n"));
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+ if (err != BCME_OK)
+ return err;
}
- /* Adding mask to channel numbers */
- params->channel_num =
- htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
-
- if (n_channels == 1) {
- params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
- params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+ if (err != BCME_OK)
+ return err;
}
-}
-
-static s32
-wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
-{
- wl_uint32_list_t *list;
- s32 err = BCME_OK;
- if (valid_chan_list == NULL || size <= 0)
- return -ENOMEM;
-
- memset(valid_chan_list, 0, size);
- list = (wl_uint32_list_t *)(void *) valid_chan_list;
- list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
- if (err != 0) {
- WL_ERR(("get channels failed with %d\n", err));
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (cfg->conf->retry_long != wiphy->retry_long)) {
+ cfg->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+ if (err != BCME_OK)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (cfg->conf->retry_short != wiphy->retry_short)) {
+ cfg->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+ if (err != BCME_OK) {
+ return err;
+ }
}
return err;
}
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
-#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
-bool g_first_broadcast_scan = TRUE;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
-static s32
-wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct cfg80211_scan_request *request, uint16 action)
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
{
- s32 err = BCME_OK;
- u32 n_channels;
- u32 n_ssids;
- s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
- wl_escan_params_t *params = NULL;
- u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
- u32 num_chans = 0;
- s32 channel;
- u32 n_valid_chan;
- s32 search_state = WL_P2P_DISC_ST_SCAN;
- u32 i, j, n_nodfs = 0;
- u16 *default_chan_list = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ u8 *buf = NULL;
wl_uint32_list_t *list;
- s32 bssidx = -1;
- struct net_device *dev = NULL;
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- bool is_first_init_2g_scan = false;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
- p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
- u32 chan_mem = 0;
- scb_val_t scbval;
- static int cnt = 0;
-
- WL_DBG(("Enter \n"));
+ int err = BCME_OK;
+ chanspec_t c = 0, ret_c = 0;
+ int bw = 0, tmp_bw = 0;
+ int i;
+ u32 tmp_c;
- /* scan request can come with empty request : perform all default scan */
- if (!cfg) {
- err = -EINVAL;
+#define LOCAL_BUF_SIZE 1024
+ buf = (u8 *)MALLOC(cfg->osh, LOCAL_BUF_SIZE);
+ if (!buf) {
+ WL_ERR(("buf memory alloc failed\n"));
goto exit;
}
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- /* LEGACY SCAN TRIGGER */
- WL_SCAN((" LEGACY E-SCAN START\n"));
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- if (!request) {
- err = -EINVAL;
- goto exit;
- }
- if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
-#ifdef USE_INITIAL_2G_SCAN
- struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
- /* allow one 5G channel to add previous connected channel in 5G */
- bool allow_one_5g_channel = TRUE;
- j = 0;
- for (i = 0; i < request->n_channels; i++) {
- int tmp_chan = ieee80211_frequency_to_channel
- (request->channels[i]->center_freq);
- if (tmp_chan > CH_MAX_2G_CHANNEL) {
- if (allow_one_5g_channel)
- allow_one_5g_channel = FALSE;
- else
- continue;
- }
- if (j > CH_MAX_2G_CHANNEL) {
- WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
- " and previous 5G connected channel\n",
- j, CH_MAX_2G_CHANNEL));
- break;
- }
- bcopy(request->channels[i], &tmp_channel_list[j],
- sizeof(struct ieee80211_channel));
- WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
- j++;
- }
- if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
- for (i = 0; i < j; i++)
- bcopy(&tmp_channel_list[i], request->channels[i],
- sizeof(struct ieee80211_channel));
-
- request->n_channels = j;
- is_first_init_2g_scan = true;
- }
- else
- WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
-
- WL_SCAN(("request->n_channels=%d\n", request->n_channels));
-#else /* USE_INITIAL_SHORT_DWELL_TIME */
- is_first_init_2g_scan = true;
-#endif /* USE_INITIAL_2G_SCAN */
- g_first_broadcast_scan = false;
- }
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- /* if scan request is not empty parse scan request paramters */
- if (request != NULL) {
- n_channels = request->n_channels;
- n_ssids = request->n_ssids;
- if (n_channels % 2)
- /* If n_channels is odd, add a padd of u16 */
- params_size += sizeof(u16) * (n_channels + 1);
- else
- params_size += sizeof(u16) * n_channels;
-
- /* Allocate space for populating ssids in wl_escan_params_t struct */
- params_size += sizeof(struct wlc_ssid) * n_ssids;
- }
- params = (wl_escan_params_t *)MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- err = -ENOMEM;
- goto exit;
- }
- wl_scan_prep(cfg, ¶ms->params, request);
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- /* Override active_time to reduce scan time if it's first bradcast scan. */
- if (is_first_init_2g_scan)
- params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- params->version = htod32(ESCAN_REQ_VERSION);
- params->action = htod16(action);
- wl_escan_set_sync_id(params->sync_id, cfg);
- wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
- if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
- WL_ERR(("ioctl buffer length not sufficient\n"));
- MFREE(cfg->osh, params, params_size);
- err = -ENOMEM;
- goto exit;
- }
- if (cfg->active_scan == PASSIVE_SCAN) {
- params->params.scan_type = DOT11_SCANTYPE_PASSIVE;
- WL_DBG(("Passive scan_type %d \n", params->params.scan_type));
- }
-
- bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
-
- err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
- cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- WL_MSG(ndev->name, "LEGACY_SCAN sync ID: %d, bssidx: %d\n", params->sync_id, bssidx);
- if (unlikely(err)) {
- if (err == BCME_EPERM)
- /* Scan Not permitted at this point of time */
- WL_DBG((" Escan not permitted at this time (%d)\n", err));
- else
- WL_ERR((" Escan set error (%d)\n", err));
- } else {
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
- }
- MFREE(cfg->osh, params, params_size);
+ err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+ 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("get chanspecs failed with %d\n", err));
+ goto exit;
}
- else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
- /* P2P SCAN TRIGGER */
- s32 _freq = 0;
- n_nodfs = 0;
-#ifdef WL_NAN
- if (wl_cfgnan_check_state(cfg)) {
- WL_ERR(("nan is enabled, nan + p2p concurrency not supported\n"));
- return BCME_UNSUPPORTED;
- }
-#endif /* WL_NAN */
- if (request && request->n_channels) {
- num_chans = request->n_channels;
- WL_SCAN((" chann number : %d\n", num_chans));
- chan_mem = num_chans * sizeof(*default_chan_list);
- default_chan_list = MALLOCZ(cfg->osh, chan_mem);
- if (default_chan_list == NULL) {
- WL_ERR(("channel list allocation failed \n"));
- err = -ENOMEM;
+ list = (wl_uint32_list_t *)(void *)buf;
+ for (i = 0; i < dtoh32(list->count); i++) {
+ c = dtoh32(list->element[i]);
+ if (channel <= CH_MAX_2G_CHANNEL) {
+ if (!CHSPEC_IS20(c))
+ continue;
+ if (channel == CHSPEC_CHANNEL(c)) {
+ ret_c = c;
+ bw = 20;
goto exit;
}
- if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
-#ifdef P2P_SKIP_DFS
- int is_printed = false;
-#endif /* P2P_SKIP_DFS */
- list = (wl_uint32_list_t *) chan_buf;
- n_valid_chan = dtoh32(list->count);
- if (n_valid_chan > WL_NUMCHANNELS) {
- WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
- MFREE(cfg->osh, default_chan_list, chan_mem);
- err = -EINVAL;
- goto exit;
- }
-
- for (i = 0; i < num_chans; i++)
- {
-#ifdef WL_HOST_BAND_MGMT
- int channel_band = 0;
-#endif /* WL_HOST_BAND_MGMT */
- _freq = request->channels[i]->center_freq;
- channel = ieee80211_frequency_to_channel(_freq);
-#ifdef WL_HOST_BAND_MGMT
- channel_band = (channel > CH_MAX_2G_CHANNEL) ?
- WLC_BAND_5G : WLC_BAND_2G;
- if ((cfg->curr_band != WLC_BAND_AUTO) &&
- (cfg->curr_band != channel_band) &&
- !IS_P2P_SOCIAL_CHANNEL(channel))
- continue;
-#endif /* WL_HOST_BAND_MGMT */
-
- /* ignore DFS channels */
- if (request->channels[i]->flags &
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- (IEEE80211_CHAN_NO_IR
- | IEEE80211_CHAN_RADAR))
-#else
- (IEEE80211_CHAN_RADAR
- | IEEE80211_CHAN_PASSIVE_SCAN))
-#endif // endif
- continue;
-#ifdef P2P_SKIP_DFS
- if (channel >= 52 && channel <= 144) {
- if (is_printed == false) {
- WL_ERR(("SKIP DFS CHANs(52~144)\n"));
- is_printed = true;
- }
- continue;
- }
-#endif /* P2P_SKIP_DFS */
-
- for (j = 0; j < n_valid_chan; j++) {
- /* allows only supported channel on
- * current reguatory
- */
- if (n_nodfs >= num_chans) {
- break;
- }
- if (channel == (dtoh32(list->element[j]))) {
- default_chan_list[n_nodfs++] =
- channel;
- }
- }
-
- }
- }
- if (num_chans == SOCIAL_CHAN_CNT && (
- (default_chan_list[0] == SOCIAL_CHAN_1) &&
- (default_chan_list[1] == SOCIAL_CHAN_2) &&
- (default_chan_list[2] == SOCIAL_CHAN_3))) {
- /* SOCIAL CHANNELS 1, 6, 11 */
- search_state = WL_P2P_DISC_ST_SEARCH;
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- WL_DBG(("P2P SEARCH PHASE START \n"));
- } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
- ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
- /* If you are already a GO, then do SEARCH only */
- WL_DBG(("Already a GO. Do SEARCH Only"));
- search_state = WL_P2P_DISC_ST_SEARCH;
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
-
- } else if (num_chans == 1) {
- p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
- WL_INFORM_MEM(("Trigger p2p join scan\n"));
- } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
- /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
- * the supplicant
- */
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- } else {
- WL_DBG(("P2P SCAN STATE START \n"));
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
- }
- } else {
- err = -EINVAL;
- goto exit;
}
- err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
- search_state, action,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
- p2p_scan_purpose);
-
- if (!err)
- cfg->p2p->search_state = search_state;
+ tmp_c = wf_chspec_ctlchan(c);
+ tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+ if (tmp_c != channel)
+ continue;
- MFREE(cfg->osh, default_chan_list, chan_mem);
+ if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+ bw = tmp_bw;
+ ret_c = c;
+ if (bw == bw_cap)
+ goto exit;
+ }
}
exit:
- if (unlikely(err)) {
- int suppressed = 0;
- wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
- /* Don't print Error incase of Scan suppress */
- if ((err == BCME_EPERM) && (cfg->scan_suppressed || suppressed)) {
- cnt = 0;
- WL_DBG(("Escan failed: Scan Suppressed \n"));
- } else {
- cnt++;
- WL_ERR(("error (%d), cnt=%d\n", err, cnt));
- // terence 20140111: send disassoc to firmware
- if (cnt >= 4) {
- dev = bcmcfg_to_prmry_ndev(cfg);
- memset(&scbval, 0, sizeof(scb_val_t));
- wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
- WL_ERR(("Send disassoc to break the busy dev=%p\n", dev));
- cnt = 0;
- }
- }
- } else {
- cnt = 0;
+ if (buf) {
+ MFREE(cfg->osh, buf, LOCAL_BUF_SIZE);
}
- return err;
+#undef LOCAL_BUF_SIZE
+ WL_DBG(("return chanspec %x %d\n", ret_c, bw));
+ return ret_c;
}
-static s32
-wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
+void
+wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+ int ibss_vsie_len)
{
- s32 err = BCME_OK;
- s32 passive_scan;
- s32 passive_scan_time;
- s32 passive_scan_time_org;
- wl_scan_results_t *results;
- WL_SCAN(("Enter \n"));
-
- results = wl_escan_get_buf(cfg, FALSE);
- results->version = 0;
- results->count = 0;
- results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
-
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
- passive_scan = cfg->active_scan ? 0 : 1;
- err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- goto exit;
- }
-
- if (passive_channel_skip) {
-
- err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- passive_scan_time = 0;
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time, sizeof(passive_scan_time));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
+ if (cfg != NULL && ibss_vsie != NULL) {
+ if (cfg->ibss_vsie != NULL) {
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
}
-
- WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
- passive_channel_skip));
+ cfg->ibss_vsie = ibss_vsie;
+ cfg->ibss_vsie_len = ibss_vsie_len;
}
+}
- err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
-
- if (passive_channel_skip) {
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
- passive_scan_time_org));
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+ /* free & initiralize VSIE (Vendor Specific IE) */
+ if (cfg->ibss_vsie != NULL) {
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
+ cfg->ibss_vsie_len = 0;
}
-
-exit:
- return err;
}
-static s32
-__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct cfg80211_ssid *ssids;
- struct ether_addr primary_mac;
- bool p2p_ssid;
-#ifdef WL11U
- bcm_tlv_t *interworking_ie;
-#endif // endif
- s32 err = 0;
- s32 bssidx = -1;
- s32 i;
-
- unsigned long flags;
- static s32 busy_count = 0;
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- struct net_device *remain_on_channel_ndev = NULL;
-#endif // endif
- /*
- * Hostapd triggers scan before starting automatic channel selection
- * to collect channel characteristics. However firmware scan engine
- * doesn't support any channel characteristics collection along with
- * scan. Hence return scan success.
- */
- if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
- WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
-// terence 20161023: let it scan in SoftAP mode
-// return 0;
- }
-
- ndev = ndev_to_wlc_ndev(ndev, cfg);
-
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
- WL_ERR(("Sending Action Frames. Try it again.\n"));
- return -EAGAIN;
- }
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ char *ioctl_buf = NULL;
+ s32 ret = BCME_OK, bssidx;
- WL_DBG(("Enter wiphy (%p)\n", wiphy));
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- if (cfg->scan_request == NULL) {
- wl_clr_drv_status_all(cfg, SCANNING);
- WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
- } else {
- WL_ERR(("Scanning already\n"));
- return -EAGAIN;
+ if (cfg != NULL && cfg->ibss_vsie != NULL) {
+ ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
+ if (!ioctl_buf) {
+ WL_ERR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
}
- }
- if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
- WL_ERR(("Scanning being aborted\n"));
- return -EAGAIN;
- }
- if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
- WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
- return -EOPNOTSUPP;
- }
-#ifdef WL_BCNRECV
- /* check fakeapscan in progress then abort */
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_SCANBUSY);
-#endif /* WL_BCNRECV */
-
-#ifdef P2P_LISTEN_OFFLOADING
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
- return -EAGAIN;
- }
-#endif /* P2P_LISTEN_OFFLOADING */
-
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
- if (remain_on_channel_ndev) {
- WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
- wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
- }
-#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-
- if (request) { /* scan bss */
- ssids = request->ssids;
- p2p_ssid = false;
- for (i = 0; i < request->n_ssids; i++) {
- if (ssids[i].ssid_len &&
- IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
- /* P2P Scan */
-#ifdef WL_BLOCK_P2P_SCAN_ON_STA
- if (!(IS_P2P_IFACE(request->wdev))) {
- /* P2P scan on non-p2p iface. Fail scan */
- WL_ERR(("p2p_search on non p2p iface\n"));
- goto scan_out;
- }
-#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
-
- p2p_ssid = true;
- break;
- }
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = BCME_ERROR;
+ goto end;
}
- if (p2p_ssid) {
- if (cfg->p2p_supported) {
-#ifdef WL_NAN
- if (cfg->nan_enable) {
- err = wl_cfgnan_disable(cfg, NAN_CONCURRENCY_CONFLICT);
- if (err != BCME_OK) {
- WL_ERR(("failed to disable nan, error[%d]\n", err));
- goto scan_out;
- }
- }
-#endif /* WL_NAN */
- /* p2p scan trigger */
- if (p2p_on(cfg) == false) {
- /* p2p on at the first time */
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#if defined(P2P_IE_MISSING_FIX)
- cfg->p2p_prb_noti = false;
-#endif // endif
- }
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- p2p_scan(cfg) = true;
- }
- } else {
- /* legacy scan trigger
- * So, we have to disable p2p discovery if p2p discovery is on
- */
- if (cfg->p2p_supported) {
- p2p_scan(cfg) = false;
- /* If Netdevice is not equals to primary and p2p is on
- * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
- */
-
- if (p2p_scan(cfg) == false) {
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- err = wl_cfgp2p_discover_enable_search(cfg,
- false);
- if (unlikely(err)) {
- goto scan_out;
- }
-
- }
- }
- }
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- if ((bssidx = wl_get_bssidx_by_wdev(cfg,
- ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from ndev(%p) failed\n",
- ndev));
- err = BCME_ERROR;
- goto scan_out;
- }
-#ifdef WL11U
- if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
- request->ie, request->ie_len)) != NULL) {
- if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
- VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
- interworking_ie->data,
- interworking_ie->len)) != BCME_OK) {
- WL_ERR(("Failed to add interworking IE"));
- }
- } else if (cfg->wl11u) {
- /* we have to clear IW IE and disable gratuitous APR */
- wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
- 0, bssidx);
- /* we don't care about error here
- * because the only failure case is unsupported,
- * which is fine
- */
- if (unlikely(err)) {
- WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
- }
- cfg->wl11u = FALSE;
- }
-#endif /* WL11U */
- if (request) {
- err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
- request->ie, request->ie_len);
- }
+ /* change the command from "add" to "del" */
+ strlcpy(cfg->ibss_vsie->cmd, "del", sizeof(cfg->ibss_vsie->cmd));
- if (unlikely(err)) {
-// terence 20161023: let it scan in SoftAP mode
-// goto scan_out;
- }
+ ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
+ cfg->ibss_vsie, cfg->ibss_vsie_len,
+ ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, NULL);
+ WL_ERR(("ret=%d\n", ret));
- }
+ if (ret == BCME_OK) {
+ /* Free & initialize VSIE */
+ MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
+ cfg->ibss_vsie_len = 0;
}
- } else { /* scan in ibss */
- ssids = this_ssid;
- }
-
- if (request && cfg->p2p_supported) {
- WL_TRACE_HW4(("START SCAN\n"));
- DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
- SCAN_WAKE_LOCK_TIMEOUT);
- DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
- }
-
- if (cfg->p2p_supported) {
- if (request && p2p_on(cfg) && p2p_scan(cfg)) {
-
- /* find my listen channel */
- cfg->afx_hdl->my_listen_chan =
- wl_find_listen_channel(cfg, request->ie,
- request->ie_len);
- err = wl_cfgp2p_enable_discovery(cfg, ndev,
- request->ie, request->ie_len);
-
- if (unlikely(err)) {
- goto scan_out;
- }
+end:
+ if (ioctl_buf) {
+ MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
}
}
- err = wl_do_escan(cfg, wiphy, ndev, request);
- if (likely(!err))
- goto scan_success;
- else
- goto scan_out;
-scan_success:
- busy_count = 0;
- cfg->scan_request = request;
- wl_set_drv_status(cfg, SCANNING, ndev);
+ return ret;
+}
- return 0;
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev* wdev = NULL;
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ long timeout;
+ wl_aibss_if_t aibss_if;
+ wl_if_event_info *event = NULL;
-scan_out:
- if (err == BCME_BUSY || err == BCME_NOTREADY) {
- WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
- err = -EBUSY;
- } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
- WL_ERR(("Scan not permitted due to scan suppress\n"));
- err = -EPERM;
- } else {
- /* For all other fw errors, use a generic error code as return
- * value to cfg80211 stack
- */
- err = -EAGAIN;
+ if (cfg->ibss_cfgdev != NULL) {
+ WL_ERR(("IBSS interface %s already exists\n", name));
+ return NULL;
}
-#define SCAN_EBUSY_RETRY_LIMIT 20
- if (err == -EBUSY) {
- /* Flush FW preserve buffer logs for checking failure */
- if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
- struct ether_addr bssid;
- s32 ret = 0;
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- busy_count = 0;
- WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
- wl_get_drv_status(cfg, SCANNING, ndev),
- wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
- wl_get_drv_status(cfg, CONNECTING, ndev),
- wl_get_drv_status(cfg, CONNECTED, ndev),
- wl_get_drv_status(cfg, DISCONNECTING, ndev),
- wl_get_drv_status(cfg, AP_CREATING, ndev),
- wl_get_drv_status(cfg, AP_CREATED, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
-
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
- dhd_bus_mem_dump(dhdp);
- }
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
-
- bzero(&bssid, sizeof(bssid));
- if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
- &bssid, ETHER_ADDR_LEN)) == 0) {
- WL_ERR(("FW is connected with " MACDBG "/n",
- MAC2STRDBG(bssid.octet)));
- } else {
- WL_ERR(("GET BSSID failed with %d\n", ret));
- }
-
- wl_cfg80211_scan_abort(cfg);
+ WL_ERR(("Try to create IBSS interface %s\n", name));
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* generate a new MAC address for the IBSS interface */
+ get_primary_mac(cfg, &cfg->ibss_if_addr);
+ cfg->ibss_if_addr.octet[4] ^= 0x40;
+ bzero(&aibss_if, sizeof(aibss_if));
+ memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+ aibss_if.chspec = 0;
+ aibss_if.len = sizeof(aibss_if);
- } else {
- /* Hold the context for 400msec, so that 10 subsequent scans
- * can give a buffer of 4sec which is enough to
- * cover any on-going scan in the firmware
- */
- WL_DBG(("Enforcing delay for EBUSY case \n"));
- msleep(400);
- }
- } else {
- busy_count = 0;
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+ sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+ goto fail;
}
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op)
+ goto fail;
- wl_clr_drv_status(cfg, SCANNING, ndev);
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->scan_request = NULL;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ event = &cfg->if_event_info;
+ /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+ * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+ * and will be freed by dhd_detach unless it gets unregistered before that. The
+ * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+ * be freed by wl_dealloc_netinfo
+ */
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+ event->mac, event->bssidx, event->name);
+ if (new_ndev == NULL)
+ goto fail;
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (wdev == NULL)
+ goto fail;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_ADHOC;
+ wdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
- return err;
-}
+ /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+ * needs to be modified to take one parameter (bool need_rtnl_lock)
+ */
+ ASSERT_RTNL();
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
+ goto fail;
-static s32
-wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
-{
- u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+ wl_alloc_netinfo(cfg, new_ndev, wdev, WL_IF_TYPE_IBSS,
+ PM_ENABLE, event->bssidx, event->ifidx);
+ cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+ WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+ return cfg->ibss_cfgdev;
- /* If NAN is enabled adding +10 sec to the existing timeout value */
-#ifdef WL_NAN
- if (cfg->nan_enable) {
- scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
+fail:
+ WL_ERR(("failed to create IBSS interface %s \n", name));
+ cfg->bss_pending_op = FALSE;
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+ if (wdev) {
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
}
-#endif /* WL_NAN */
- WL_INFORM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
- return scan_timer_interval_ms;
+ return NULL;
}
static s32
-#if defined(WL_CFG80211_P2P_DEV_IF)
-wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
-#else
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-#endif /* WL_CFG80211_P2P_DEV_IF */
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
{
- s32 err = 0;
+ int err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(WL_CFG80211_P2P_DEV_IF)
- struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- WL_DBG(("Enter\n"));
- RETURN_EIO_IF_NOT_UP(cfg);
-
-#ifdef DHD_IFDEBUG
-#ifdef WL_CFG80211_P2P_DEV_IF
- PRINT_WDEV_INFO(request->wdev);
-#else
- PRINT_WDEV_INFO(ndev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#endif /* DHD_IFDEBUG */
+ struct net_device *ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ long timeout;
- if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- if (wl_cfg_multip2p_operational(cfg)) {
- WL_ERR(("wlan0 scan failed, p2p devices are operational"));
- return -ENODEV;
- }
- }
- err = wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY,
- WL_EXT_STATUS_SCAN, NULL);
- if (err)
- return err;
+ if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+ return -EINVAL;
+ ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- mutex_lock(&cfg->usr_sync);
- err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
- if (unlikely(err)) {
- WL_ERR(("scan error (%d)\n", err));
- } else {
- /* Arm the timer */
- mod_timer(&cfg->scan_timeout,
- jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+ sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+ goto fail;
}
- mutex_unlock(&cfg->usr_sync);
-#ifdef WL_DRV_AVOID_SCANCACHE
- /* Reset roam cache after successful scan request */
-#ifdef ROAM_CHANNEL_CACHE
- if (!err) {
- reset_roam_cache(cfg);
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ goto fail;
}
-#endif /* ROAM_CHANNEL_CACHE */
-#endif /* WL_DRV_AVOID_SCANCACHE */
- return err;
+
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+ cfg->ibss_cfgdev = NULL;
+ return 0;
+
+fail:
+ cfg->bss_pending_op = FALSE;
+ return -1;
}
+#endif /* WLAIBSS_MCHAN */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-static void
-wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
+s32
+wl_cfg80211_to_fw_iftype(wl_iftype_t iftype)
{
- struct bcm_cfg80211 *cfg;
+ s32 ret = BCME_ERROR;
- WL_DBG(("Enter %s\n", __FUNCTION__));
- cfg = wiphy_priv(wdev->wiphy);
+ switch (iftype) {
+ case WL_IF_TYPE_AP:
+ ret = WL_INTERFACE_TYPE_AP;
+ break;
+ case WL_IF_TYPE_STA:
+ ret = WL_INTERFACE_TYPE_STA;
+ break;
+ case WL_IF_TYPE_NAN_NMI:
+ case WL_IF_TYPE_NAN:
+ ret = WL_INTERFACE_TYPE_NAN;
+ break;
+ case WL_IF_TYPE_P2P_DISC:
+ ret = WL_INTERFACE_TYPE_P2P_DISC;
+ break;
+ case WL_IF_TYPE_P2P_GO:
+ ret = WL_INTERFACE_TYPE_P2P_GO;
+ break;
+ case WL_IF_TYPE_P2P_GC:
+ ret = WL_INTERFACE_TYPE_P2P_GC;
+ break;
- /* Check if any scan in progress only then abort */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_cfg80211_scan_abort(cfg);
- /* Only scan abort is issued here. As per the expectation of abort_scan
- * the status of abort is needed to be communicated using cfg80211_scan_done call.
- * Here we just issue abort request and let the scan complete path to indicate
- * abort to cfg80211 layer.
- */
- WL_DBG(("%s: Scan abort issued to FW\n", __FUNCTION__));
+ default:
+ WL_ERR(("Unsupported type:%d \n", iftype));
+ ret = -EINVAL;
+ break;
}
+ return ret;
}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
-static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+bool
+wl_legacy_chip_check(struct bcm_cfg80211 *cfg)
{
- s32 err = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ uint chip;
- err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
- if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
- return err;
+ chip = dhd_conf_get_chip(dhd);
+
+ if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
+ chip == BCM43430_CHIP_ID || chip == BCM43012_CHIP_ID ||
+ chip == BCM4334_CHIP_ID || chip == BCM43340_CHIP_ID ||
+ chip == BCM43341_CHIP_ID || chip == BCM4324_CHIP_ID ||
+ chip == BCM4335_CHIP_ID || chip == BCM4339_CHIP_ID ||
+ chip == BCM4345_CHIP_ID || chip == BCM43454_CHIP_ID ||
+ chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
+ chip == BCM4371_CHIP_ID || chip == BCM4359_CHIP_ID ||
+ chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
+ chip == BCM43569_CHIP_ID) {
+ return true;
}
- return err;
+
+ return false;
}
-static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+s32
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t cfg_iftype, s32 del, u8 *addr)
{
- s32 err = 0;
+ s32 ret;
+ struct wl_interface_create_v2 iface;
+ wl_interface_create_v3_t iface_v3;
+ wl_interface_create_t iface_v0;
+ struct wl_interface_info_v1 *info;
+ wl_interface_info_v2_t *info_v2;
+ wl_interface_info_t *info_v0;
+ uint32 ifflags = 0;
+ bool use_iface_info_v2 = false;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 iftype;
- err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
- if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
- return err;
+ if (del) {
+ ret = wldev_iovar_setbuf(ndev, "interface_remove",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (unlikely(ret))
+ WL_ERR(("Interface remove failed!! ret %d\n", ret));
+ return ret;
}
- return err;
-}
+ printf("%s: mac=%pM\n", __FUNCTION__, addr);
-static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
-{
- s32 err = 0;
- u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+ /* Interface create */
+ bzero(&iface, sizeof(iface));
+ /*
+ * flags field is still used along with iftype inorder to support the old version of the
+ * FW work with the latest app changes.
+ */
-#ifdef CUSTOM_LONG_RETRY_LIMIT
- if ((cmd == WLC_SET_LRL) &&
- (retry != CUSTOM_LONG_RETRY_LIMIT)) {
- WL_DBG(("CUSTOM_LONG_RETRY_LIMIT is used.Ignore configuration"));
- return err;
+ iftype = wl_cfg80211_to_fw_iftype(cfg_iftype);
+ if (iftype < 0) {
+ return -ENOTSUPP;
}
-#endif /* CUSTOM_LONG_RETRY_LIMIT */
- retry = htod32(retry);
- err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
- if (unlikely(err)) {
- WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
- return err;
+ if (addr) {
+ ifflags |= WL_INTERFACE_MAC_USE;
}
- return err;
-}
-static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- s32 err = 0;
+ /* Pass ver = 0 for fetching the interface_create iovar version */
+ if (wl_legacy_chip_check(cfg)) {
+ bzero(&iface_v0, sizeof(iface_v0));
+ iface_v0.ver = WL_INTERFACE_CREATE_VER;
+ iface_v0.flags = iftype | ifflags;
+ if (addr) {
+ memcpy(&iface_v0.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface_v0, sizeof(struct wl_interface_create),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (ret == 0) {
+ info_v0 = (wl_interface_info_t *)ioctl_buf;
+ ret = info_v0->bsscfgidx;
+ goto exit;
+ }
+ } else {
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ }
+ if (ret == BCME_UNSUPPORTED) {
+ WL_ERR(("interface_create iovar not supported\n"));
+ return ret;
+ } else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
+ WL_DBG(("interface_create version 3. flags:0x%x \n", ifflags));
+ use_iface_info_v2 = true;
+ bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.iftype = iftype;
+ iface_v3.flags = ifflags;
+ if (addr) {
+ memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface_v3, sizeof(wl_interface_create_v3_t),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ } else {
+ /* On any other error, attempt with iovar version 2 */
+ WL_DBG(("interface_create version 2. get_ver:%d ifflags:0x%x\n", ret, ifflags));
+ iface.ver = WL_INTERFACE_CREATE_VER_2;
+ iface.iftype = iftype;
+ iface.flags = ifflags;
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ }
- RETURN_EIO_IF_NOT_UP(cfg);
- WL_DBG(("Enter\n"));
- if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
- cfg->conf->rts_threshold = wiphy->rts_threshold;
- err = wl_set_rts(ndev, cfg->conf->rts_threshold);
- if (err != BCME_OK)
- return err;
+ if (unlikely(ret)) {
+ WL_ERR(("Interface create failed!! ret %d\n", ret));
+ return ret;
}
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
- cfg->conf->frag_threshold = wiphy->frag_threshold;
- err = wl_set_frag(ndev, cfg->conf->frag_threshold);
- if (err != BCME_OK)
- return err;
+
+ /* success case */
+ if (use_iface_info_v2 == true) {
+ info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
+ ret = info_v2->bsscfgidx;
+ } else {
+ /* Use v1 struct */
+ info = (struct wl_interface_info_v1 *)ioctl_buf;
+ ret = info->bsscfgidx;
}
- if (changed & WIPHY_PARAM_RETRY_LONG &&
- (cfg->conf->retry_long != wiphy->retry_long)) {
- cfg->conf->retry_long = wiphy->retry_long;
- err = wl_set_retry(ndev, cfg->conf->retry_long, true);
- if (err != BCME_OK)
- return err;
+
+exit:
+ WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
+ return ret;
+}
+
+bool
+wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev)
+{
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+ ASSERT(0);
+ return false;
}
- if (changed & WIPHY_PARAM_RETRY_SHORT &&
- (cfg->conf->retry_short != wiphy->retry_short)) {
- cfg->conf->retry_short = wiphy->retry_short;
- err = wl_set_retry(ndev, cfg->conf->retry_short, false);
- if (err != BCME_OK) {
- return err;
- }
+
+ WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+ chipnum = revinfo.chipnum;
+ if ((chipnum == BCM4350_CHIP_ID) || (chipnum == BCM4355_CHIP_ID) ||
+ (chipnum == BCM4345_CHIP_ID) || (chipnum == BCM43430_CHIP_ID) ||
+ (chipnum == BCM43362_CHIP_ID) || (chipnum == BCM43012_CHIP_ID) ||
+ (chipnum == BCM4356_CHIP_ID)) {
+ /* WAR required */
+ return true;
}
- return err;
+ return false;
}
-static chanspec_t
-channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+
+void
+wl_bss_iovar_war(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 *val)
{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- u8 *buf = NULL;
- wl_uint32_list_t *list;
- int err = BCME_OK;
- chanspec_t c = 0, ret_c = 0;
- int bw = 0, tmp_bw = 0;
- int i;
- u32 tmp_c;
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
+ bool need_war = false;
-#define LOCAL_BUF_SIZE 1024
- buf = (u8 *)MALLOC(cfg->osh, LOCAL_BUF_SIZE);
- if (!buf) {
- WL_ERR(("buf memory alloc failed\n"));
- goto exit;
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+ } else {
+ WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+ chipnum = revinfo.chipnum;
+ if ((chipnum == BCM4359_CHIP_ID) || (chipnum == BCM43596_CHIP_ID)) {
+ /* WAR required */
+ need_war = true;
+ }
}
- err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
- 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
- if (err != BCME_OK) {
- WL_ERR(("get chanspecs failed with %d\n", err));
- goto exit;
+ if (wl_customer6_legacy_chip_check(cfg, ndev) || need_war) {
+ /* Few firmware branches have issues in bss iovar handling and
+ * that can't be changed since they are in production.
+ */
+ if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else {
+ /* Ignore for other bss enums */
+ return;
+ }
+ WL_DBG(("wl bss %d\n", *val));
}
+}
- list = (wl_uint32_list_t *)(void *)buf;
- for (i = 0; i < dtoh32(list->count); i++) {
- c = dtoh32(list->element[i]);
- if (channel <= CH_MAX_2G_CHANNEL) {
- if (!CHSPEC_IS20(c))
- continue;
- if (channel == CHSPEC_CHANNEL(c)) {
- ret_c = c;
- bw = 20;
- goto exit;
- }
- }
- tmp_c = wf_chspec_ctlchan(c);
- tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
- if (tmp_c != channel)
- continue;
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ wl_iftype_t brcm_iftype, s32 del, u8 *addr)
+{
+ s32 ret = BCME_OK;
+ s32 val = 0;
- if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
- bw = tmp_bw;
- ret_c = c;
- if (bw == bw_cap)
- goto exit;
- }
+ struct {
+ s32 cfg;
+ s32 val;
+ struct ether_addr ea;
+ } bss_setbuf;
+
+ WL_DBG(("wl_iftype:%d del:%d \n", brcm_iftype, del));
+
+ bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+ /* AP=2, STA=3, up=1, down=0, val=-1 */
+ if (del) {
+ val = WLC_AP_IOV_OP_DELETE;
+ } else if (brcm_iftype == WL_IF_TYPE_AP) {
+ /* Add/role change to AP Interface */
+ WL_DBG(("Adding AP Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else if (brcm_iftype == WL_IF_TYPE_STA) {
+ /* Add/role change to STA Interface */
+ WL_DBG(("Adding STA Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else {
+ WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", brcm_iftype));
+ return -EINVAL;
}
-exit:
- if (buf) {
- MFREE(cfg->osh, buf, LOCAL_BUF_SIZE);
+
+ if (!del) {
+ wl_bss_iovar_war(cfg, ndev, &val);
}
-#undef LOCAL_BUF_SIZE
- WL_DBG(("return chanspec %x %d\n", ret_c, bw));
- return ret_c;
-}
-void
-wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
- int ibss_vsie_len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
- if (cfg != NULL && ibss_vsie != NULL) {
- if (cfg->ibss_vsie != NULL) {
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- }
- cfg->ibss_vsie = ibss_vsie;
- cfg->ibss_vsie_len = ibss_vsie_len;
+ if (addr) {
+ memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
}
-}
-static void
-wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
-{
- /* free & initiralize VSIE (Vendor Specific IE) */
- if (cfg->ibss_vsie != NULL) {
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie = NULL;
- cfg->ibss_vsie_len = 0;
- }
+ WL_INFORM_MEM(("wl bss %d bssidx:%d iface:%s \n", val, bsscfg_idx, ndev->name));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret != 0)
+ WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+ return ret;
}
s32
-wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- char *ioctl_buf = NULL;
- s32 ret = BCME_OK, bssidx;
+ s32 ret = BCME_OK;
+ s32 val = bss_up ? 1 : 0;
- if (cfg != NULL && cfg->ibss_vsie != NULL) {
- ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
- if (!ioctl_buf) {
- WL_ERR(("ioctl memory alloc failed\n"));
- return -ENOMEM;
- }
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- ret = BCME_ERROR;
- goto end;
- }
- /* change the command from "add" to "del" */
- strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
- cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
- ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
- cfg->ibss_vsie, cfg->ibss_vsie_len,
- ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, NULL);
- WL_ERR(("ret=%d\n", ret));
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
- if (ret == BCME_OK) {
- /* free & initialize VSIE */
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie = NULL;
- cfg->ibss_vsie_len = 0;
- }
-end:
- if (ioctl_buf) {
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- }
+ WL_INFORM_MEM(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret != 0) {
+ WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
}
return ret;
}
-#ifdef WLAIBSS_MCHAN
-static bcm_struct_cfgdev*
-bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+bool
+wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
{
- int err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev* wdev = NULL;
- struct net_device *new_ndev = NULL;
- struct net_device *primary_ndev = NULL;
- s32 timeout;
- wl_aibss_if_t aibss_if;
- wl_if_event_info *event = NULL;
-
- if (cfg->ibss_cfgdev != NULL) {
- WL_ERR(("IBSS interface %s already exists\n", name));
- return NULL;
- }
-
- WL_ERR(("Try to create IBSS interface %s\n", name));
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- /* generate a new MAC address for the IBSS interface */
- get_primary_mac(cfg, &cfg->ibss_if_addr);
- cfg->ibss_if_addr.octet[4] ^= 0x40;
- memset(&aibss_if, sizeof(aibss_if), 0);
- memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
- aibss_if.chspec = 0;
- aibss_if.len = sizeof(aibss_if);
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
- cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
- sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
- goto fail;
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+ if (result != 0) {
+ WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+ WL_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
}
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op)
- goto fail;
-
- event = &cfg->if_event_info;
- /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
- * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
- * and will be freed by dhd_detach unless it gets unregistered before that. The
- * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
- * be freed by wl_dealloc_netinfo
- */
- new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
- event->mac, event->bssidx, event->name);
- if (new_ndev == NULL)
- goto fail;
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
- if (wdev == NULL)
- goto fail;
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_ADHOC;
- wdev->netdev = new_ndev;
- new_ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
-
- /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
- * needs to be modified to take one parameter (bool need_rtnl_lock)
- */
- ASSERT_RTNL();
- if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
- goto fail;
+ return isup;
+}
- wl_alloc_netinfo(cfg, new_ndev, wdev, WL_IF_TYPE_IBSS,
- PM_ENABLE, event->bssidx, event->ifidx);
- cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
- WL_ERR(("IBSS interface %s created\n", new_ndev->name));
- return cfg->ibss_cfgdev;
+s32
+wl_iftype_to_mode(wl_iftype_t iftype)
+{
+ s32 mode = BCME_ERROR;
-fail:
- WL_ERR(("failed to create IBSS interface %s \n", name));
- cfg->bss_pending_op = FALSE;
- if (new_ndev)
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
- if (wdev) {
- MFREE(cfg->osh, wdev, sizeof(*wdev));
+ switch (iftype) {
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_P2P_DISC:
+ mode = WL_MODE_BSS;
+ break;
+ case WL_IF_TYPE_AP:
+ case WL_IF_TYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ break;
+ case WL_IF_TYPE_NAN:
+ mode = WL_MODE_NAN;
+ break;
+ case WL_IF_TYPE_AIBSS:
+ /* Intentional fall through */
+ case WL_IF_TYPE_IBSS:
+ mode = WL_MODE_IBSS;
+ break;
+ default:
+ WL_ERR(("Unsupported type:%d\n", iftype));
+ break;
}
- return NULL;
+ return mode;
}
-static s32
-bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
-{
- int err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = NULL;
- struct net_device *primary_ndev = NULL;
- s32 timeout;
-
- if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
- return -EINVAL;
- ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
-
- cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
- sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
- goto fail;
- }
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("timeout in waiting IF_DEL event\n"));
- goto fail;
- }
-
- wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
- cfg->ibss_cfgdev = NULL;
- return 0;
-
-fail:
- cfg->bss_pending_op = FALSE;
- return -1;
-}
-#endif /* WLAIBSS_MCHAN */
-
s32
-wl_cfg80211_to_fw_iftype(wl_iftype_t iftype)
+cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
{
- s32 ret = BCME_ERROR;
-
- switch (iftype) {
- case WL_IF_TYPE_AP:
- ret = WL_INTERFACE_TYPE_AP;
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *role = WL_IF_TYPE_STA;
+ *mode = WL_MODE_BSS;
break;
- case WL_IF_TYPE_STA:
- ret = WL_INTERFACE_TYPE_STA;
+ case NL80211_IFTYPE_AP:
+ *role = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
break;
- case WL_IF_TYPE_NAN_NMI:
- case WL_IF_TYPE_NAN:
- ret = WL_INTERFACE_TYPE_NAN;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ case NL80211_IFTYPE_P2P_DEVICE:
+ *role = WL_IF_TYPE_P2P_DISC;
+ *mode = WL_MODE_BSS;
break;
- case WL_IF_TYPE_P2P_DISC:
- ret = WL_INTERFACE_TYPE_P2P_DISC;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ case NL80211_IFTYPE_P2P_GO:
+ *role = WL_IF_TYPE_P2P_GO;
+ *mode = WL_MODE_AP;
break;
- case WL_IF_TYPE_P2P_GO:
- ret = WL_INTERFACE_TYPE_P2P_GO;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *role = WL_IF_TYPE_P2P_GC;
+ *mode = WL_MODE_BSS;
break;
- case WL_IF_TYPE_P2P_GC:
- ret = WL_INTERFACE_TYPE_P2P_GC;
+ case NL80211_IFTYPE_MONITOR:
+ WL_ERR(("Unsupported mode \n"));
+ return BCME_UNSUPPORTED;
+ case NL80211_IFTYPE_ADHOC:
+ *role = WL_IF_TYPE_IBSS;
+ *mode = WL_MODE_IBSS;
break;
- case WL_IF_TYPE_AWDL:
- ret = WL_INTERFACE_TYPE_AWDL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ case NL80211_IFTYPE_NAN:
+ *role = WL_IF_TYPE_NAN;
+ *mode = WL_MODE_NAN;
break;
-
+#endif // endif
default:
- WL_ERR(("Unsupported type:%d \n", iftype));
- ret = -EINVAL;
- break;
+ WL_ERR(("Unknown interface type:0x%x\n", type));
+ return BCME_ERROR;
}
- return ret;
+ return BCME_OK;
}
-s32
-wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t cfg_iftype, s32 del, u8 *addr)
+static s32
+wl_role_to_cfg80211_type(uint16 role, uint16 *wl_iftype, uint16 *mode)
{
- s32 ret;
- struct wl_interface_create_v2 iface;
- wl_interface_create_v3_t iface_v3;
- struct wl_interface_info_v1 *info;
- wl_interface_info_v2_t *info_v2;
- uint32 ifflags = 0;
- bool use_iface_info_v2 = false;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- s32 iftype;
+ switch (role) {
+ case WLC_E_IF_ROLE_STA:
+ *wl_iftype = WL_IF_TYPE_STA;
+ *mode = WL_MODE_BSS;
+ return NL80211_IFTYPE_STATION;
+ case WLC_E_IF_ROLE_AP:
+ *wl_iftype = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_AP;
+ case WLC_E_IF_ROLE_P2P_GO:
+ *wl_iftype = WL_IF_TYPE_P2P_GO;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_P2P_GO;
+ case WLC_E_IF_ROLE_P2P_CLIENT:
+ *wl_iftype = WL_IF_TYPE_P2P_GC;
+ *mode = WL_MODE_BSS;
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case WLC_E_IF_ROLE_IBSS:
+ *wl_iftype = WL_IF_TYPE_IBSS;
+ *mode = WL_MODE_IBSS;
+ return NL80211_IFTYPE_ADHOC;
+ case WLC_E_IF_ROLE_NAN:
+ *wl_iftype = WL_IF_TYPE_NAN;
+ *mode = WL_MODE_NAN;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && defined(WL_CFG80211_NAN)
+ /* NL80211_IFTYPE_NAN should only be used with CFG80211 NAN MGMT
+ * For Vendor HAL based NAN implementation, continue advertising
+ * as a STA interface
+ */
+ return NL80211_IFTYPE_NAN;
+#else
+ return NL80211_IFTYPE_STATION;
+#endif /* ((LINUX_VER >= KERNEL_VERSION(4, 9, 0))) && WL_CFG80211_NAN */
+#ifdef WLDWDS
+ case WLC_E_IF_ROLE_WDS:
+ *wl_iftype = WL_IF_TYPE_AP;
+ *mode = WL_MODE_AP;
+ return NL80211_IFTYPE_AP;
+#endif
- if (del) {
- ret = wldev_iovar_setbuf(ndev, "interface_remove",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
- if (unlikely(ret))
- WL_ERR(("Interface remove failed!! ret %d\n", ret));
- return ret;
+ default:
+ WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
+ return BCME_ERROR;
}
+}
- /* Interface create */
- bzero(&iface, sizeof(iface));
- /*
- * flags field is still used along with iftype inorder to support the old version of the
- * FW work with the latest app changes.
- */
+struct net_device *
+wl_cfg80211_post_ifcreate(struct net_device *ndev,
+ wl_if_event_info *event, u8 *addr,
+ const char *name, bool rtnl_lock_reqd)
+{
+ struct bcm_cfg80211 *cfg;
+ struct net_device *primary_ndev;
+ struct net_device *new_ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+ s32 iface_type;
+ s32 ret = BCME_OK;
+ u16 mode;
+ u8 mac_addr[ETH_ALEN];
+ u16 wl_iftype;
- iftype = wl_cfg80211_to_fw_iftype(cfg_iftype);
- if (iftype < 0) {
- return -ENOTSUPP;
+ if (!ndev || !event) {
+ WL_ERR(("Wrong arg\n"));
+ return NULL;
}
- if (addr) {
- ifflags |= WL_INTERFACE_MAC_USE;
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return NULL;
}
- /* Pass ver = 0 for fetching the interface_create iovar version */
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (ret == BCME_UNSUPPORTED) {
- WL_ERR(("interface_create iovar not supported\n"));
- return ret;
- } else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
- WL_DBG(("interface_create version 3. flags:0x%x \n", ifflags));
- use_iface_info_v2 = true;
- bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
- iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
- iface_v3.iftype = iftype;
- iface_v3.flags = ifflags;
- if (addr) {
- memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface_v3, sizeof(wl_interface_create_v3_t),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- } else {
- /* On any other error, attempt with iovar version 2 */
- WL_DBG(("interface_create version 2. get_ver:%d ifflags:0x%x\n", ret, ifflags));
- iface.ver = WL_INTERFACE_CREATE_VER_2;
- iface.iftype = iftype;
- iface.flags = ifflags;
- if (addr) {
- memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
+ WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
+ event->role, event->ifidx, event->bssidx));
+ if (!event->ifidx || !event->bssidx) {
+ /* Fw returned primary idx (0) for virtual interface */
+ WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
+ event->ifidx, event->bssidx));
+ return NULL;
}
- if (unlikely(ret)) {
- WL_ERR(("Interface create failed!! ret %d\n", ret));
- return ret;
+ iface_type = wl_role_to_cfg80211_type(event->role, &wl_iftype, &mode);
+ if (iface_type < 0) {
+ /* Unknown iface type */
+ WL_ERR(("Wrong iface type \n"));
+ return NULL;
}
- /* success case */
- if (use_iface_info_v2 == true) {
- info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
- ret = info_v2->bsscfgidx;
- } else {
- /* Use v1 struct */
- info = (struct wl_interface_info_v1 *)ioctl_buf;
- ret = info->bsscfgidx;
+#ifdef WL_EXT_IAPSTA
+ if (wl_ext_check_mesh_creating(ndev)) {
+ WL_MSG(ndev->name, "change iface_type to NL80211_IFTYPE_MESH_POINT\n");
+ iface_type = NL80211_IFTYPE_MESH_POINT;
}
+#endif
- WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
- return ret;
-}
+ WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
+ addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
+ if (!name) {
+ /* If iface name is not provided, use dongle ifname */
+ name = event->name;
+ }
-s32
-wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t brcm_iftype, s32 del, u8 *addr)
-{
- s32 ret = BCME_OK;
- s32 val = 0;
+ if (!addr) {
+ /* If mac address is not set, use primary mac with locally administered
+ * bit set.
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
+ /* For customer6 builds, use primary mac address for virtual interface */
+ mac_addr[0] |= 0x02;
+ addr = mac_addr;
+ }
- struct {
- s32 cfg;
- s32 val;
- struct ether_addr ea;
- } bss_setbuf;
+#ifdef WL_STATIC_IF
+ if (IS_CFG80211_STATIC_IF_NAME(cfg, name)) {
+ new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type);
+ if (!new_ndev) {
+ WL_ERR(("failed to get I/F pointer\n"));
+ return NULL;
+ }
+ wdev = new_ndev->ieee80211_ptr;
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_update_iftype(new_ndev, event->ifidx, wl_iftype);
+#endif
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+ name, addr, event->bssidx, event->name);
+ if (!new_ndev) {
+ WL_ERR(("I/F allocation failed! \n"));
+ return NULL;
+ } else {
+ WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+ event->ifidx, event->bssidx));
+ }
- WL_DBG(("wl_iftype:%d del:%d \n", brcm_iftype, del));
+ wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ return NULL;
+ }
- bzero(&bss_setbuf, sizeof(bss_setbuf));
+ wdev->wiphy = bcmcfg_to_wiphy(cfg);
+ wdev->iftype = iface_type;
- /* AP=2, STA=3, up=1, down=0, val=-1 */
- if (del) {
- val = WLC_AP_IOV_OP_DELETE;
- } else if (brcm_iftype == WL_IF_TYPE_AP) {
- /* Add/role change to AP Interface */
- WL_DBG(("Adding AP Interface \n"));
- val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
- } else if (brcm_iftype == WL_IF_TYPE_STA) {
- /* Add/role change to STA Interface */
- WL_DBG(("Adding STA Interface \n"));
- val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
- } else {
- WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", brcm_iftype));
- return -EINVAL;
+ new_ndev->ieee80211_ptr = wdev;
+#ifdef WLDWDS
+ /* set wds0.x to 4addr interface here */
+ if (event->role == WLC_E_IF_ROLE_WDS) {
+ printf("\n\n\n event->role == WLC_E_IF_ROLE_WDS, set vwdev 4addr to %s\n", event->name);
+ wdev->use_4addr = true;
+ }
+#endif /* WLDWDS */
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+ memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd)
+ != BCME_OK) {
+ WL_ERR(("IFACE register failed \n"));
+ /* Post interface registration, wdev would be freed from the netdev
+ * destructor path. For other cases, handle it here.
+ */
+ MFREE(cfg->osh, wdev, sizeof(*wdev));
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ return NULL;
+ }
}
- bss_setbuf.cfg = htod32(bsscfg_idx);
- bss_setbuf.val = htod32(val);
+ /* Initialize with the station mode params */
+ ret = wl_alloc_netinfo(cfg, new_ndev, wdev, wl_iftype,
+ PM_ENABLE, event->bssidx, event->ifidx);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
+ goto fail;
+ }
- if (addr) {
- memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ /* Apply the mode & infra setting based on iftype */
+ if ((ret = wl_config_infra(cfg, new_ndev, wl_iftype)) < 0) {
+ WL_ERR(("config ifmode failure (%d)\n", ret));
+ goto fail;
}
- WL_INFORM_MEM(("wl bss %d bssidx:%d iface:%s \n", val, bsscfg_idx, ndev->name));
- ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (ret != 0)
- WL_ERR(("'bss %d' failed with %d\n", val, ret));
+ if (mode == WL_MODE_AP) {
+ wl_set_drv_status(cfg, AP_CREATING, new_ndev);
+ }
- return ret;
+ WL_INFORM_MEM(("Network Interface (%s) registered with host."
+ " cfg_iftype:%d wl_role:%d " MACDBG "\n",
+ new_ndev->name, iface_type, event->role, MAC2STRDBG(new_ndev->dev_addr)));
+
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+
+ return new_ndev;
+
+fail:
+#ifdef WL_STATIC_IF
+ /* remove static if from iflist */
+ if (IS_CFG80211_STATIC_IF_NAME(cfg, name)) {
+ cfg->static_ndev_state = NDEV_STATE_FW_IF_FAILED;
+ wl_cfg80211_update_iflist_info(cfg, new_ndev, WL_STATIC_IFIDX, addr,
+ event->bssidx, event->name, NDEV_STATE_FW_IF_FAILED);
+ }
+#endif /* WL_STATIC_IF */
+ if (new_ndev) {
+ /* wdev would be freed from netdev destructor call back */
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ }
+
+ return NULL;
}
s32
-wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
+wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg,
+ wl_iftype_t sec_data_if_type)
{
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
s32 ret = BCME_OK;
- s32 val = bss_up ? 1 : 0;
-
- struct {
- s32 cfg;
- s32 val;
- } bss_setbuf;
-
- bss_setbuf.cfg = htod32(bsscfg_idx);
- bss_setbuf.val = htod32(val);
+ uint8 i = 0;
- WL_INFORM_MEM(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
- ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(ret);
- if (ret != 0) {
- WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Enter, deleting iftype %s\n",
+ wl_iftype_to_str(sec_data_if_type)));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ if (iter->iftype != sec_data_if_type) {
+ continue;
+ }
+ switch (sec_data_if_type) {
+ case WL_IF_TYPE_P2P_GO:
+ case WL_IF_TYPE_P2P_GC: {
+ ret = _wl_cfg80211_del_if(cfg,
+ iter->ndev, NULL, iter->ndev->name);
+ break;
+ }
+#ifdef WL_NAN
+ case WL_IF_TYPE_NAN: {
+ if (cfg->nan_enable == false) {
+ WL_INFORM_MEM(("Nan is not active,"
+ " ignore NDI delete\n"));
+ } else {
+ ret = wl_cfgnan_delete_ndp(cfg, iter->ndev);
+ }
+ break;
+ }
+#endif /* WL_NAN */
+ case WL_IF_TYPE_AP: {
+ /* Cleanup AP */
+#ifdef WL_STATIC_IF
+ /* handle static ap */
+ if (IS_CFG80211_STATIC_IF(cfg, iter->ndev)) {
+ dev_close(iter->ndev);
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ /* handle virtual created AP */
+ ret = _wl_cfg80211_del_if(cfg, iter->ndev,
+ NULL, iter->ndev->name);
+ }
+ break;
+ }
+ default: {
+ WL_ERR(("Unsupported interface type\n"));
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+ }
+ }
}
-
+fail:
return ret;
}
-bool
-wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
+void
+wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd)
{
- s32 result, val;
- bool isup = false;
- s8 getbuf[64];
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
- /* Check if the BSS is up */
- *(int*)getbuf = -1;
- result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
- sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
- if (result != 0) {
- WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
- WL_ERR(("NOTE: this ioctl error is normal "
- "when the BSS has not been created yet.\n"));
- } else {
- val = *(int*)getbuf;
- val = dtoh32(val);
- WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
- isup = (val ? TRUE : FALSE);
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("Enter\n"));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ /* Ensure interfaces are down before deleting */
+#ifdef WL_STATIC_IF
+ /* Avoiding cleaning static ifaces */
+ if (!IS_CFG80211_STATIC_IF(cfg, iter->ndev))
+#endif /* WL_STATIC_IF */
+ {
+ dev_close(iter->ndev);
+ WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
+ wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
+ }
+ }
}
- return isup;
}
s32
-wl_iftype_to_mode(wl_iftype_t iftype)
+wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx)
{
- s32 mode = BCME_ERROR;
+ s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg;
+ struct net_info *netinfo = NULL;
- switch (iftype) {
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_P2P_GC:
- case WL_IF_TYPE_P2P_DISC:
- mode = WL_MODE_BSS;
- break;
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_P2P_GO:
- mode = WL_MODE_AP;
- break;
- case WL_IF_TYPE_NAN:
- mode = WL_MODE_NAN;
- break;
- case WL_IF_TYPE_AWDL:
- mode = WL_MODE_AWDL;
- break;
- case WL_IF_TYPE_AIBSS:
- /* Intentional fall through */
- case WL_IF_TYPE_IBSS:
- mode = WL_MODE_IBSS;
- break;
- default:
- WL_ERR(("Unsupported type:%d\n", iftype));
- break;
+ if (!ndev || !ndev->ieee80211_ptr) {
+ /* No wireless dev done for this interface */
+ ret = -EINVAL;
+ goto exit;
}
- return mode;
-}
-static s32
-cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
-{
- switch (type) {
- case NL80211_IFTYPE_STATION:
- *role = WL_IF_TYPE_STA;
- *mode = WL_MODE_BSS;
- break;
- case NL80211_IFTYPE_AP:
- *role = WL_IF_TYPE_AP;
- *mode = WL_MODE_AP;
- break;
-#ifdef WL_CFG80211_P2P_DEV_IF
- case NL80211_IFTYPE_P2P_DEVICE:
- *role = WL_IF_TYPE_P2P_DISC;
- *mode = WL_MODE_BSS;
- break;
-#endif /* WL_CFG80211_P2P_DEV_IF */
- case NL80211_IFTYPE_P2P_GO:
- *role = WL_IF_TYPE_P2P_GO;
- *mode = WL_MODE_AP;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- *role = WL_IF_TYPE_P2P_GC;
- *mode = WL_MODE_BSS;
- break;
- case NL80211_IFTYPE_MONITOR:
- WL_ERR(("Unsupported mode \n"));
- return BCME_UNSUPPORTED;
- case NL80211_IFTYPE_ADHOC:
- *role = WL_IF_TYPE_IBSS;
- *mode = WL_MODE_IBSS;
- break;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
- case NL80211_IFTYPE_NAN:
- *role = WL_IF_TYPE_NAN;
- *mode = WL_MODE_NAN;
- break;
-#endif // endif
- default:
- WL_ERR(("Unknown interface type:0x%x\n", type));
- return BCME_ERROR;
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ ret = BCME_ERROR;
+ goto exit;
}
- return BCME_OK;
+
+ if (ifidx <= 0) {
+ WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
+ ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
+ BCM_REFERENCE(ifidx);
+ if (ifidx <= 0) {
+ ASSERT(0);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
+
+ if ((netinfo = wl_get_netinfo_by_wdev(cfg, ndev_to_wdev(ndev))) == NULL) {
+ WL_ERR(("Find netinfo from wdev %p failed\n", ndev_to_wdev(ndev)));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+#ifdef WL_STATIC_IF
+ if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
+ ret = wl_cfg80211_post_static_ifdel(cfg, ndev);
+ } else
+#endif /* WL_STATIC_IF */
+ {
+ WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d, vif_count:%d\n",
+ ndev->name, ifidx, cfg->vif_count));
+ wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
+ cfg->bss_pending_op = FALSE;
+ }
+
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+exit:
+ return ret;
}
-static s32
-wl_role_to_cfg80211_type(uint16 role, uint16 *wl_iftype, uint16 *mode)
+int
+wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 *cfg)
{
- switch (role) {
- *wl_iftype = WL_IF_TYPE_AWDL;
- *mode = WL_MODE_AWDL;
- return NL80211_IFTYPE_STATION;
- case WLC_E_IF_ROLE_STA:
- *wl_iftype = WL_IF_TYPE_STA;
- *mode = WL_MODE_BSS;
- return NL80211_IFTYPE_STATION;
- case WLC_E_IF_ROLE_AP:
- *wl_iftype = WL_IF_TYPE_AP;
- *mode = WL_MODE_AP;
- return NL80211_IFTYPE_AP;
- case WLC_E_IF_ROLE_P2P_GO:
- *wl_iftype = WL_IF_TYPE_P2P_GO;
- *mode = WL_MODE_AP;
- return NL80211_IFTYPE_P2P_GO;
- case WLC_E_IF_ROLE_P2P_CLIENT:
- *wl_iftype = WL_IF_TYPE_P2P_GC;
- *mode = WL_MODE_BSS;
- return NL80211_IFTYPE_P2P_CLIENT;
- case WLC_E_IF_ROLE_IBSS:
- *wl_iftype = WL_IF_TYPE_IBSS;
- *mode = WL_MODE_IBSS;
- return NL80211_IFTYPE_ADHOC;
- case WLC_E_IF_ROLE_NAN:
- *wl_iftype = WL_IF_TYPE_NAN;
- *mode = WL_MODE_NAN;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && defined(WL_CFG80211_NAN)
- /* NL80211_IFTYPE_NAN should only be used with CFG80211 NAN MGMT
- * For Vendor HAL based NAN implementation, continue advertising
- * as a STA interface
- */
- return NL80211_IFTYPE_NAN;
+ s32 ret = BCME_OK;
+ bcm_struct_cfgdev *cfgdev;
+
+ if (cfg->p2p) {
+ /* De-initialize the p2p discovery interface, if operational */
+ WL_ERR(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+ cfgdev = bcmcfg_to_p2p_wdev(cfg);
#else
- return NL80211_IFTYPE_STATION;
-#endif /* ((LINUX_VER >= KERNEL_VERSION(4, 9, 0))) && WL_CFG80211_NAN */
+ cfgdev = cfg->p2p_net;
+#endif // endif
+ if (cfgdev) {
+ ret = wl_cfg80211_scan_stop(cfg, cfgdev);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+ }
+ }
- default:
- WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
- return BCME_ERROR;
+ wl_cfgp2p_disable_discovery(cfg);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ p2p_on(cfg) = false;
}
+ return ret;
}
-#define MAX_ACTIVE_IF_LINKS 2
-struct net_device *
-wl_cfg80211_post_ifcreate(struct net_device *ndev,
- wl_if_event_info *event, u8 *addr,
- const char *name, bool rtnl_lock_reqd)
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+struct wireless_dev *
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+ wl_iftype_t wl_iftype,
+ u8 *mac_addr, const char *name)
{
- struct bcm_cfg80211 *cfg;
- struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *new_ndev = NULL;
- struct wireless_dev *wdev = NULL;
- s32 iface_type;
+ struct net_device *primary_ndev = NULL;
s32 ret = BCME_OK;
- u16 mode;
- u8 mac_addr[ETH_ALEN];
- u16 wl_iftype;
-#ifdef WL_STATIC_IF
- bool static_if = false;
-#endif /* WL_STATIC_IF */
+ s32 bsscfg_idx = 0;
+ long timeout;
+ wl_if_event_info *event = NULL;
+ u8 addr[ETH_ALEN];
+ struct net_info *iter, *next;
- if (!ndev || !event) {
- WL_ERR(("Wrong arg\n"));
+ WL_DBG(("Enter\n"));
+ if (!name) {
+ WL_ERR(("Interface name not provided\n"));
return NULL;
}
-
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("cfg null\n"));
- return NULL;
+ else {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, name, strlen(name)) == 0) {
+ WL_ERR(("Interface name,%s exists!\n", iter->ndev->name));
+ return NULL;
+ }
+ }
+ }
}
-
- WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
- event->role, event->ifidx, event->bssidx));
- if (!event->ifidx || !event->bssidx) {
- /* Fw returned primary idx (0) for virtual interface */
- WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
- event->ifidx, event->bssidx));
- return NULL;
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (likely(!mac_addr)) {
+ /* Use primary MAC with the locally administered bit for the
+ * Secondary STA I/F
+ */
+ memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+ addr[0] |= 0x02;
+ } else {
+ /* Use the application provided mac address (if any) */
+ memcpy(addr, mac_addr, ETH_ALEN);
}
- if (wl_get_drv_status_all(cfg, CONNECTED) > MAX_ACTIVE_IF_LINKS) {
- WL_ERR(("Can't support more than %d active links\n", MAX_ACTIVE_IF_LINKS));
- return NULL;
- }
+ cfg->bss_pending_op = TRUE;
+ bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- iface_type = wl_role_to_cfg80211_type(event->role, &wl_iftype, &mode);
- if (iface_type < 0) {
- /* Unknown iface type */
- WL_ERR(("Wrong iface type \n"));
- return NULL;
+ /*
+ * Intialize the firmware I/F.
+ */
+ {
+ ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
+ wl_iftype, 0, addr);
}
-
-#ifdef WL_EXT_IAPSTA
- if (wl_ext_check_mesh_creating(ndev)) {
- printf("%s: change iface_type to NL80211_IFTYPE_MESH_POINT\n", __FUNCTION__);
- iface_type = NL80211_IFTYPE_MESH_POINT;
+ if (ret == BCME_UNSUPPORTED) {
+ /* Use bssidx 1 by default */
+ bsscfg_idx = 1;
+ if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+ bsscfg_idx, wl_iftype, 0, addr)) < 0) {
+ goto exit;
+ }
+ } else if (ret < 0) {
+ WL_ERR(("Interface create failed!! ret:%d \n", ret));
+ goto exit;
+ } else {
+ /* Success */
+ bsscfg_idx = ret;
}
-#endif
- WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
- addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
- if (!name) {
- /* If iface name is not provided, use dongle ifname */
- name = event->name;
+ WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
+ /*
+ * Wait till the firmware send a confirmation event back.
+ */
+ WL_DBG(("Wait for the FW I/F Event\n"));
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("ADD_IF event, didn't come. Return. timeout:%lu bss_pending_op:%d\n",
+ timeout, cfg->bss_pending_op));
+ if (timeout == -ERESTARTSYS) {
+ WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
+ }
+ goto exit;
}
- if (!addr) {
- /* If mac address is not set, use primary mac with locally administered
- * bit set.
- */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
- /* For customer6 builds, use primary mac address for virtual interface */
- mac_addr[0] |= 0x02;
- addr = mac_addr;
- }
+ event = &cfg->if_event_info;
+ /*
+ * Since FW operation is successful,we can go ahead with the
+ * the host interface creation.
+ */
+ new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
+ event, addr, name, false);
-#ifdef WL_STATIC_IF
- static_if = IS_CFG80211_STATIC_IF_NAME(cfg, name);
- if (static_if) {
- new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type);
- wdev = new_ndev->ieee80211_ptr;
- } else
-#endif /* WL_STATIC_IF */
- {
- new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
- name, addr, event->bssidx, event->name);
- if (!new_ndev) {
- WL_ERR(("I/F allocation failed! \n"));
- return NULL;
- } else {
- WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
- event->ifidx, event->bssidx));
- }
-
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
- if (!wdev) {
- WL_ERR(("wireless_dev alloc failed! \n"));
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- return NULL;
- }
-
- wdev->wiphy = bcmcfg_to_wiphy(cfg);
- wdev->iftype = iface_type;
+ if (new_ndev) {
+ /* Iface post ops successful. Return ndev/wdev ptr */
+ return new_ndev->ieee80211_ptr;
+ }
- new_ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+exit:
+ cfg->bss_pending_op = FALSE;
+ return NULL;
+}
- memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
- if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd)
- != BCME_OK) {
- WL_ERR(("IFACE register failed \n"));
- /* Post interface registration, wdev would be freed from the netdev
- * destructor path. For other cases, handle it here.
- */
- MFREE(cfg->osh, wdev, sizeof(*wdev));
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- return NULL;
- }
- }
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ s32 ret = BCME_OK;
+ s32 bsscfg_idx = 1;
+ long timeout;
+ u16 wl_iftype;
+ u16 wl_mode;
- /* Initialize with the station mode params */
- ret = wl_alloc_netinfo(cfg, new_ndev, wdev, wl_iftype,
- PM_ENABLE, event->bssidx, event->ifidx);
- if (unlikely(ret)) {
- WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
- goto fail;
- }
+ WL_DBG(("Enter\n"));
- /* Apply the mode & infra setting based on iftype */
- if ((ret = wl_config_infra(cfg, new_ndev, wl_iftype)) < 0) {
- WL_ERR(("config ifmode failure (%d)\n", ret));
- goto fail;
+ /* If any scan is going on, abort it */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_DBG(("Scan in progress. Aborting the scan!\n"));
+ wl_cfg80211_cancel_scan(cfg);
}
- if (mode == WL_MODE_AP) {
- wl_set_drv_status(cfg, AP_CREATING, new_ndev);
+ bsscfg_idx = wl_get_bssidx_by_wdev(cfg, wdev);
+ if (bsscfg_idx <= 0) {
+ /* validate bsscfgidx */
+ WL_ERR(("Wrong bssidx! \n"));
+ return -EINVAL;
}
- WL_INFORM_MEM(("Network Interface (%s) registered with host."
- " cfg_iftype:%d wl_role:%d " MACDBG "\n",
- new_ndev->name, iface_type, event->role, MAC2STRDBG(new_ndev->dev_addr)));
-
+ /* Handle p2p iface */
+ if ((ret = wl_cfg80211_p2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
+ WL_DBG(("P2P iface del handled \n"));
#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 0);
+ wl_cfg80211_set_cac(cfg, 1);
#endif /* SUPPORT_SET_CAC */
-
- return new_ndev;
-
-fail:
-#ifdef WL_STATIC_IF
- /* remove static if from iflist */
- if (static_if) {
- cfg->static_ndev_state = NDEV_STATE_FW_IF_FAILED;
- wl_cfg80211_update_iflist_info(cfg, new_ndev, WL_STATIC_IFIDX, addr,
- event->bssidx, event->name, NDEV_STATE_FW_IF_FAILED);
- }
-#endif /* WL_STATIC_IF */
- if (new_ndev) {
- /* wdev would be freed from netdev destructor call back */
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+ return ret;
}
- return NULL;
-}
-
-void
-wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd)
-{
- struct net_info *iter, *next;
- struct net_device *primary_ndev;
-
- /* Note: This function will clean up only the network interface and host
- * data structures. The firmware interface clean up will happen in the
- * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
- * context for the module case).
- */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_DBG(("Enter\n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- if (iter->ndev && (iter->ndev != primary_ndev)) {
- WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
- wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
- }
+ ndev = wdev->netdev;
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev null! \n"));
+ return -EINVAL;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-}
-s32
-wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx)
-{
- s32 ret = BCME_OK;
- struct bcm_cfg80211 *cfg;
- u16 wl_iftype;
- struct net_info *netinfo = NULL;
-#ifdef WL_STATIC_IF
- bool static_if = false;
-#endif /* WL_STATIC_IF */
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- if (!ndev || !ndev->ieee80211_ptr) {
- /* No wireless dev done for this interface */
- ret = -EINVAL;
- goto exit;
+ if (cfg80211_to_wl_iftype(ndev->ieee80211_ptr->iftype,
+ &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
}
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("cfg null\n"));
- ret = BCME_ERROR;
+ WL_DBG(("del interface. bssidx:%d cfg_iftype:%d wl_iftype:%d",
+ bsscfg_idx, ndev->ieee80211_ptr->iftype, wl_iftype));
+ /* Delete the firmware interface. "interface_remove" command
+ * should go on the interface to be deleted
+ */
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ WL_ERR(("Bus state is down: %d\n", __LINE__));
+ ret = BCME_DONGLE_DOWN;
goto exit;
}
- if (ifidx <= 0) {
- WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
- ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
- BCM_REFERENCE(ifidx);
- if (ifidx <= 0) {
- ASSERT(0);
- ret = BCME_ERROR;
+ cfg->bss_pending_op = true;
+ ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
+ wl_iftype, 1, NULL);
+ if (ret == BCME_UNSUPPORTED) {
+ if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+ bsscfg_idx, wl_iftype, true, NULL)) < 0) {
+ WL_ERR(("DEL bss failed ret:%d \n", ret));
goto exit;
}
+ } else if ((ret == BCME_NOTAP) || (ret == BCME_NOTSTA)) {
+ /* De-init sequence involving role downgrade not happened.
+ * Do nothing and return error. The del command should be
+ * retried.
+ */
+ WL_ERR(("ifdel role mismatch:%d\n", ret));
+ ret = -EBADTYPE;
+ goto exit;
+ } else if (ret < 0) {
+ WL_ERR(("Interface DEL failed ret:%d \n", ret));
+ goto exit;
}
- if ((netinfo = wl_get_netinfo_by_wdev(cfg, ndev_to_wdev(ndev))) == NULL) {
- WL_ERR(("Find netinfo from wdev %p failed\n", ndev_to_wdev(ndev)));
- ret = -ENODEV;
- goto exit;
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ /* The interface unregister will happen from wifi reset context */
+ ret = -ETIMEDOUT;
+ /* fall through */
}
- wl_iftype = netinfo->iftype;
+exit:
+ if (ret < 0) {
+ WL_ERR(("iface del failed:%d\n", ret));
#ifdef WL_STATIC_IF
- static_if = IS_CFG80211_STATIC_IF(cfg, ndev);
- if (static_if) {
- ret = wl_cfg80211_post_static_ifdel(cfg, ndev);
- } else
+ if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
+ /*
+ * For static interface, clean up the host data,
+ * irrespective of fw status. For dynamic
+ * interfaces it gets cleaned from dhd_stop context
+ */
+ wl_cfg80211_post_static_ifdel(cfg, ndev);
+ }
#endif /* WL_STATIC_IF */
- {
-#ifdef WL_NAN
- if (!((cfg->nancfg.mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
-#endif /* WL_NAN */
- {
- wl_release_vif_macaddr(cfg, ndev->dev_addr, wl_iftype);
+ } else {
+ ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
+ if (unlikely(ret)) {
+ WL_ERR(("post_ifdel failed\n"));
}
- WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d, vif_count:%d\n",
- ndev->name, ifidx, cfg->vif_count));
- wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
- cfg->bss_pending_op = FALSE;
}
-#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 1);
-#endif /* SUPPORT_SET_CAC */
-exit:
+ cfg->bss_pending_op = false;
return ret;
}
-int
-wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 *cfg)
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
{
- s32 ret = BCME_OK;
- bcm_struct_cfgdev *cfgdev;
-
- if (cfg->p2p) {
- /* De-initialize the p2p discovery interface, if operational */
- WL_ERR(("Disabling P2P Discovery Interface \n"));
-#ifdef WL_CFG80211_P2P_DEV_IF
- cfgdev = bcmcfg_to_p2p_wdev(cfg);
-#else
- cfgdev = cfg->p2p_net;
-#endif // endif
- if (cfgdev) {
- ret = wl_cfg80211_scan_stop(cfg, cfgdev);
- if (unlikely(ret < 0)) {
- CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
- }
- }
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *chan;
+ struct wl_join_params join_params;
+ int scan_suppress;
+ struct cfg80211_ssid ssid;
+ s32 scan_retry = 0;
+ s32 err = 0;
+ size_t join_params_size;
+ chanspec_t chanspec = 0;
+ u32 param[2] = {0, 0};
+ u32 bw_cap = 0;
- wl_cfgp2p_disable_discovery(cfg);
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
- p2p_on(cfg) = false;
- }
- return ret;
-}
-
-/* Create a Generic Network Interface and initialize it depending up on
- * the interface type
- */
-struct wireless_dev *
-wl_cfg80211_create_iface(struct wiphy *wiphy,
- wl_iftype_t wl_iftype,
- u8 *mac_addr, const char *name)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *new_ndev = NULL;
- struct net_device *primary_ndev = NULL;
- s32 ret = BCME_OK;
- s32 bsscfg_idx = 0;
- u32 timeout;
- wl_if_event_info *event = NULL;
- u8 addr[ETH_ALEN];
- struct net_info *iter, *next;
-
- WL_DBG(("Enter\n"));
- if (!name) {
- WL_ERR(("Interface name not provided\n"));
- return NULL;
- }
- else {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- if (iter->ndev) {
- if (strcmp(iter->ndev->name, name) == 0) {
- WL_ERR(("Interface name, %s exists !\n", iter->ndev->name));
- return NULL;
- }
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- }
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- if (likely(!mac_addr)) {
- /* Use primary MAC with the locally administered bit for the
- * Secondary STA I/F
- */
- memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
- addr[0] |= 0x02;
- } else {
- /* Use the application provided mac address (if any) */
- memcpy(addr, mac_addr, ETH_ALEN);
- }
-
- cfg->bss_pending_op = TRUE;
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
-
- /* De-initialize the p2p discovery interface, if operational */
- wl_cfg80211_deinit_p2p_discovery(cfg);
-
- /*
- * Intialize the firmware I/F.
- */
- {
- ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
- wl_iftype, 0, addr);
- }
- if (ret == BCME_UNSUPPORTED) {
- /* Use bssidx 1 by default */
- bsscfg_idx = 1;
- if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
- bsscfg_idx, wl_iftype, 0, addr)) < 0) {
- goto exit;
- }
- } else if (ret < 0) {
- WL_ERR(("Interface create failed!! ret:%d \n", ret));
- goto exit;
- } else {
- /* Success */
- bsscfg_idx = ret;
- }
-
- WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
- /*
- * Wait till the firmware send a confirmation event back.
- */
- WL_DBG(("Wait for the FW I/F Event\n"));
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("ADD_IF event, didn't come. Return \n"));
- goto exit;
- }
-
- event = &cfg->if_event_info;
- /*
- * Since FW operation is successful,we can go ahead with the
- * the host interface creation.
- */
- new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
- event, addr, name, false);
-
- if (new_ndev) {
- /* Iface post ops successful. Return ndev/wdev ptr */
- return new_ndev->ieee80211_ptr;
- }
-
-exit:
- cfg->bss_pending_op = FALSE;
- return NULL;
-}
-
-s32
-wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = NULL;
- s32 ret = BCME_OK;
- s32 bsscfg_idx = 1;
- u32 timeout;
- u16 wl_iftype;
- u16 wl_mode;
-
- WL_DBG(("Enter\n"));
-
- /* If any scan is going on, abort it */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- WL_DBG(("Scan in progress. Aborting the scan!\n"));
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
- }
-
- bsscfg_idx = wl_get_bssidx_by_wdev(cfg, wdev);
- if (bsscfg_idx <= 0) {
- /* validate bsscfgidx */
- WL_ERR(("Wrong bssidx! \n"));
- return -EINVAL;
- }
-
- /* Handle p2p iface */
- if ((ret = wl_cfg80211_p2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
- WL_DBG(("P2P iface del handled \n"));
-#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 1);
-#endif /* SUPPORT_SET_CAC */
- return ret;
- }
-
- ndev = wdev->netdev;
- if (unlikely(!ndev)) {
- WL_ERR(("ndev null! \n"));
- return -EINVAL;
- }
-
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
-
- if (cfg80211_to_wl_iftype(ndev->ieee80211_ptr->iftype,
- &wl_iftype, &wl_mode) < 0) {
- return -EINVAL;
- }
-
- WL_DBG(("del interface. bssidx:%d cfg_iftype:%d wl_iftype:%d",
- bsscfg_idx, ndev->ieee80211_ptr->iftype, wl_iftype));
- /* Delete the firmware interface. "interface_remove" command
- * should go on the interface to be deleted
- */
- cfg->bss_pending_op = true;
- ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
- wl_iftype, 1, NULL);
- if (ret == BCME_UNSUPPORTED) {
- if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
- bsscfg_idx, wl_iftype, true, NULL)) < 0) {
- WL_ERR(("DEL bss failed ret:%d \n", ret));
- goto exit;
- }
- } else if ((ret == BCME_NOTAP) || (ret == BCME_NOTSTA)) {
- /* De-init sequence involving role downgrade not happened.
- * Do nothing and return error. The del command should be
- * retried.
- */
- WL_ERR(("ifdel role mismatch:%d\n", ret));
- ret = -EBADTYPE;
- goto exit;
- } else if (ret < 0) {
- WL_ERR(("Interface DEL failed ret:%d \n", ret));
- goto exit;
- }
-
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("timeout in waiting IF_DEL event\n"));
- /* The interface unregister will happen from wifi reset context */
- ret = -ETIMEDOUT;
- goto exit;
- }
-
- ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
- if (unlikely(ret)) {
- WL_ERR(("post_ifdel failed\n"));
- }
-
-exit:
- cfg->bss_pending_op = false;
- return ret;
-}
-
-static s32
-wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct cfg80211_bss *bss;
- struct ieee80211_channel *chan;
- struct wl_join_params join_params;
- int scan_suppress;
- struct cfg80211_ssid ssid;
- s32 scan_retry = 0;
- s32 err = 0;
- size_t join_params_size;
- chanspec_t chanspec = 0;
- u32 param[2] = {0, 0};
- u32 bw_cap = 0;
-
- WL_TRACE(("In\n"));
- RETURN_EIO_IF_NOT_UP(cfg);
- WL_INFORM_MEM(("IBSS JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
- if (!params->ssid || params->ssid_len <= 0 ||
- params->ssid_len > DOT11_MAX_SSID_LEN) {
- WL_ERR(("Invalid parameter\n"));
- return -EINVAL;
+ WL_TRACE(("In\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_INFORM_MEM(("IBSS JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+ if (!params->ssid || params->ssid_len <= 0 ||
+ params->ssid_len > DOT11_MAX_SSID_LEN) {
+ WL_ERR(("Invalid parameter\n"));
+ return -EINVAL;
}
#if defined(WL_CFG80211_P2P_DEV_IF)
chan = params->chandef.chan;
* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
*/
- memset(&join_params, 0, sizeof(join_params));
+ bzero(&join_params, sizeof(join_params));
memcpy((void *)join_params.ssid.SSID, (const void *)params->ssid,
params->ssid_len);
join_params.ssid.SSID_len = htod32(params->ssid_len);
return err;
}
} else
- memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+ bzero(&join_params.params.bssid, ETHER_ADDR_LEN);
if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
scan_suppress = TRUE;
if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
val = WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif // endif
WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK|
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif // endif
WPA2_AUTH_UNSPECIFIED;
else
val = WPA_AUTH_DISABLED;
s32 err = 0;
s32 bssidx;
- WL_DBG((" %s \n", __FUNCTION__));
+ WL_DBG((" wl_set_set_wapi_ie\n"));
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
val = WL_AUTH_OPEN_SHARED;
WL_DBG(("automatic\n"));
break;
-#ifdef BCMCCX
- case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_DBG(("network eap\n"));
- val = DOT11_LEAP_AUTH;
- break;
-#endif // endif
#ifdef WL_FILS
case NL80211_AUTHTYPE_FILS_SK:
WL_DBG(("fils shared key\n"));
return err;
}
+static u32
+wl_rsn_cipher_wsec_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_algo;
+ }
+ }
+ return WSEC_NONE;
+}
+
+static u32
+wl_rsn_cipher_wsec_key_algo_lookup(uint32 cipher)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
+ if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
+ return rsn_cipher_algo_lookup_tbl[i].wsec_key_algo;
+ }
+ }
+ return CRYPTO_ALGO_OFF;
+}
+
+static u32
+wl_rsn_akm_wpa_auth_lookup(uint32 akm)
+{
+ uint i;
+
+ for (i = 0; i < ARRAYSIZE(rsn_akm_wpa_auth_lookup_tbl); i++) {
+ if (akm == rsn_akm_wpa_auth_lookup_tbl[i].akm_suite) {
+ return rsn_akm_wpa_auth_lookup_tbl[i].wpa_auth;
+ }
+ }
+ return WPA_AUTH_DISABLED;
+}
+
static s32
wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
{
s32 val = 0;
#endif // endif
s32 bssidx;
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
}
if (sme->crypto.n_ciphers_pairwise) {
+ pval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.ciphers_pairwise[0]);
+ if (pval == WSEC_NONE) {
+ WL_ERR(("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0]));
+ return BCME_BADARG;
+ }
switch (sme->crypto.ciphers_pairwise[0]) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- pval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- pval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- pval = AES_ENABLED;
- break;
#ifdef BCMWAPI_WPI
case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- pval = SMS4_ENABLED;
+ val = pval;
err = wl_set_set_wapi_ie(dev, sme);
if (unlikely(err)) {
WL_DBG(("Set wapi ie failed \n"));
}
break;
#endif /* BCMWAPI_WPI */
- default:
- WL_ERR(("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(wl_rsn_cipher_wsec_key_algo_lookup(
+ sme->crypto.ciphers_pairwise[0]));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
}
}
#if defined(BCMSUP_4WAY_HANDSHAKE)
}
#endif /* BCMSUP_4WAY_HANDSHAKE */
if (sme->crypto.cipher_group) {
+ gval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.cipher_group);
+ if (gval == WSEC_NONE) {
+ WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group));
+ return BCME_BADARG;
+ }
switch (sme->crypto.cipher_group) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- gval = WEP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- gval = TKIP_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- gval = AES_ENABLED;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- gval = AES_ENABLED;
- break;
#ifdef BCMWAPI_WPI
case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- gval = SMS4_ENABLED;
+ val = gval;
break;
#endif // endif
- default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ algos = KEY_ALGO_MASK(
+ wl_rsn_cipher_wsec_key_algo_lookup(sme->crypto.cipher_group));
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ break;
}
}
WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+#ifdef WL_GCMP
+ WL_DBG(("algos:%x, mask:%x", algos, mask));
+#endif /* WL_GCMP */
if (is_wps_conn(sme)) {
if (sme->privacy) {
WL_ERR(("error (%d)\n", err));
return err;
}
-
+#ifdef WL_GCMP
+ if (wl_set_wsec_info_algos(dev, algos, mask)) {
+ WL_ERR(("set wsec_info error (%d)\n", err));
+ }
+#endif /* WL_GCMP */
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
return err;
}
+#ifdef WL_GCMP
+static s32
+wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx;
+ s32 err = 0;
+ wl_wsec_info_t *wsec_info;
+ bcm_xtlv_t *wsec_info_tlv;
+ uint16 tlv_data_len;
+ uint8 tlv_data[8];
+ uint32 param_len;
+ uint8 * buf;
+
+ WL_DBG(("enter.\n"));
+ if (!cfg) {
+ return BCME_ERROR;
+ }
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index from wdev(%p) failed\n", dev->ieee80211_ptr));
+ return BCME_ERROR;
+ }
+
+ buf = MALLOCZ(cfg->osh, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ if (!buf) {
+ WL_ERR(("No memory"));
+ return BCME_NOMEM;
+ }
+ wsec_info = (wl_wsec_info_t *)buf;
+ wsec_info->version = WL_WSEC_INFO_VERSION;
+ wsec_info_tlv = (bcm_xtlv_t *)(buf + OFFSETOF(wl_wsec_info_t, tlvs));
+
+ wsec_info->num_tlvs++;
+ tlv_data_len = sizeof(tlv_data);
+ err = memcpy_s(tlv_data, sizeof(tlv_data), &algos, sizeof(algos));
+ if (err) {
+ goto exit;
+ }
+ err = memcpy_s(tlv_data + sizeof(algos), sizeof(mask), &mask, sizeof(mask));
+ if (err) {
+ goto exit;
+ }
+ bcm_xtlv_pack_xtlv(wsec_info_tlv, WL_WSEC_INFO_BSS_ALGOS, tlv_data_len, tlv_data, 0);
+ param_len = OFFSETOF(wl_wsec_info_t, tlvs) + WL_WSEC_INFO_TLV_HDR_LEN + tlv_data_len;
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_info", wsec_info, param_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(err))
+ WL_ERR(("wsec_info error (%d)\n", err));
+exit:
+ MFREE(cfg->osh, buf, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
+ return err;
+}
+#endif /* WL_GCMP */
#ifdef MFP
static s32
wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
WL_DBG(("rsn_cap 0x%x%x\n", rsn_cap[0], rsn_cap[1]));
/* Check for MFP cap in the RSN capability field */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
- if (sme->mfp) {
-#endif
+ if (sme->mfp)
+#endif // endif
+ {
if (rsn_cap[0] & RSN_CAP_MFPR) {
mfp = WL_MFP_REQUIRED;
} else if (rsn_cap[0] & RSN_CAP_MFPC) {
mfp = WL_MFP_CAPABLE;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
}
-#endif
/*
* eptr --> end/last byte addr of wpa2_ie
* ptr --> to keep track of current/required byte addr
group_mgmt_cs, (WPA_SUITE_LEN - 1)) == 0) {
WL_DBG(("BIP is found\n"));
err = wldev_iovar_setbuf(dev, "bip",
- group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
/*
* Dont return failure for unsupported cases
* of bip iovar for backward compatibility
#endif /* MFP */
#ifdef WL_FILS
-static bool
+bool
wl_is_fils_supported(struct net_device *ndev)
{
s32 err;
return err;
}
if (val & (WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif // endif
WPA_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_8021X:
case WLAN_AKM_SUITE_PSK:
val = WPA_AUTH_PSK;
break;
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA_AUTH_CCKM;
- break;
-#endif // endif
default:
WL_ERR(("invalid akm suite (0x%x)\n",
sme->crypto.akm_suites[0]));
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK |
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif // endif
WPA2_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA2_AUTH_UNSPECIFIED;
- break;
#ifdef MFP
case WL_AKM_SUITE_SHA256_1X:
val = WPA2_AUTH_1X_SHA256;
val = WPA2_AUTH_PSK_SHA256;
break;
#endif /* MFP */
+ case WLAN_AKM_SUITE_8021X:
case WLAN_AKM_SUITE_PSK:
- val = WPA2_AUTH_PSK;
- break;
#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
case WLAN_AKM_SUITE_FT_8021X:
- val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
- break;
#endif // endif
#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
case WLAN_AKM_SUITE_FT_PSK:
- val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
- break;
-#endif // endif
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA2_AUTH_CCKM;
- break;
#endif // endif
-#ifdef WL_FILS
case WLAN_AKM_SUITE_FILS_SHA256:
- val = WPA2_AUTH_FILS_SHA256;
- break;
case WLAN_AKM_SUITE_FILS_SHA384:
- val = WPA2_AUTH_FILS_SHA384;
- break;
-#endif /* WL_FILS */
+ case WLAN_AKM_SUITE_8021X_SUITE_B:
+ case WLAN_AKM_SUITE_8021X_SUITE_B_192:
+#ifdef WL_OWE
+ case WLAN_AKM_SUITE_OWE:
+#endif /* WL_OWE */
#ifdef WL_SAE
case WLAN_AKM_SUITE_SAE:
- val = WPA3_AUTH_SAE_PSK;
- break;
#endif /* WL_SAE */
+ case WLAN_AKM_SUITE_FT_8021X_SHA384:
+ val = wl_rsn_akm_wpa_auth_lookup(sme->crypto.akm_suites[0]);
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA256:
+ val = WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT;
+ break;
+ case WLAN_AKM_SUITE_FT_FILS_SHA384:
+ val = WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT;
+ break;
default:
WL_ERR(("invalid akm suite (0x%x)\n",
sme->crypto.akm_suites[0]));
(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
WLAN_CIPHER_SUITE_WEP104)))
{
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (unlikely(key.len > sizeof(key.data))) {
}
memcpy(key.data, sme->key, key.len);
key.flags = WL_PRIMARY_KEY;
- switch (sec->cipher_pairwise) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- default:
+ if ((sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP40) ||
+ (sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP104)) {
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(sec->cipher_pairwise);
+ } else {
WL_ERR(("Invalid algorithm (%d)\n",
sme->crypto.ciphers_pairwise[0]));
return -EINVAL;
chanspec = wl_chspec_driver_to_host(chanspec);
isvht80 = chanspec & WL_CHANSPEC_BW_80;
- WL_DBG(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+ WL_DBG(("wl_get_chan_isvht80: chanspec(%x:%d)\n", chanspec, isvht80));
return isvht80;
}
int wait_cnt;
if (disassociate) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, dev), DOT11_RC_DISASSOC_LEAVING);
WL_ERR(("Disassociate previous connection!\n"));
wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = DOT11_RC_DISASSOC_LEAVING;
* for driver clean up.
*/
wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ wl_clr_drv_status(cfg, CONNECTED, dev);
return BCME_NOTREADY;
}
return BCME_OK;
#ifdef ESCAN_CHANNEL_CACHE
chanspec_t chanspec_list[MAX_ROAM_CHANNEL];
#endif /* ESCAN_CHANNEL_CACHE */
-#if (defined(BCM4334_CHIP) || defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
int wait_cnt;
-#endif // endif
+ char sec[32];
WL_DBG(("In\n"));
+ if (!dev) {
+ WL_ERR(("dev is null\n"));
+ return -EINVAL;
+ }
BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(ASSOC_START), dhd_net2idx(dhdp->info, dev), 0);
#ifdef ESCAN_CHANNEL_CACHE
memset(chanspec_list, 0, (sizeof(chanspec_t) * MAX_ROAM_CHANNEL));
/* Connection attempted via linux-wireless */
wl_set_drv_status(cfg, CFG80211_CONNECT, dev);
+#ifdef DHDTCPSYNC_FLOOD_BLK
+ dhd_reset_tcpsync_info_by_dev(dev);
+#endif /* DHDTCPSYNC_FLOOD_BLK */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
-#if defined(WL_FW_OCE_AP_SELECT)
+#ifdef WL_SKIP_CONNECT_HINTS
+ skip_hints = true;
+#elif defined(WL_FW_OCE_AP_SELECT)
/* override bssid_hint for oce networks */
skip_hints = (fw_ap_select && wl_cfg80211_is_oce_ap(wiphy, sme->bssid_hint));
#endif // endif
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
wl_cfg80211_scan_abort(cfg);
OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
}
if (wl_get_drv_status(cfg, SCANNING, dev)) {
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
}
-#endif // endif
#ifdef WL_SCHED_SCAN
/* Locks are taken in wl_cfg80211_sched_scan_stop()
* A start scan occuring during connect is unlikely
*/
if (cfg->sched_scan_req) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg), 0);
+ wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ cfg->sched_scan_req->reqid);
#else
wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
wl_cfg80211_check_in4way(cfg, dev, WAIT_DISCONNECTED,
WL_EXT_STATUS_CONNECTING, NULL);
+ if (sme->bssid) {
+ wl_update_prof(cfg, dev, NULL, sme->bssid, WL_PROF_LATEST_BSSID);
+ } else {
+ wl_update_prof(cfg, dev, NULL, ðer_bcast, WL_PROF_LATEST_BSSID);
+ }
+#ifdef SUPPORT_AP_BWCTRL
+ if (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ wl_restore_ap_bw(cfg);
+ }
+#endif /* SUPPORT_AP_BWCTRL */
/* 'connect' request received */
wl_set_drv_status(cfg, CONNECTING, dev);
/* clear nested connect bit on proceeding for connection */
bool is_roamtrig_reset = TRUE;
bool is_roam_env_ok = (wldev_iovar_setint(dev, "roam_env_detection",
AP_ENV_DETECT_NOT_USED) == BCME_OK);
-
#ifdef SKIP_ROAM_TRIGGER_RESET
roam_trigger[1] = WLC_BAND_2G;
is_roamtrig_reset =
}
memcpy(ssid.SSID, sme->ssid, sme->ssid_len);
- ssid.SSID_len = sme->ssid_len;
+ ssid.SSID_len = (uint32)sme->ssid_len;
chan_cnt = get_roam_channel_list(cfg->channel, chanspec_list,
MAX_ROAM_CHANNEL, &ssid, ioctl_version);
WL_DBG(("RCC channel count:%d \n", chan_cnt));
wl_clr_drv_status(cfg, CONNECTING, dev);
goto exit;
}
- ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ ext_join_params->ssid.SSID_len =
+ (uint32)min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
#endif /* WLTDLS */
#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_channel(dev, cfg->channel);
+ wl_ext_iapsta_update_channel(dhdp, dev, cfg->channel);
#endif
- err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec));
if (cfg->rcc_enabled) {
- WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\","
- " len (%d) with rcc channels. chan_cnt:%d \n\n",
+ WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\", len (%d), "
+ "sec=%s, with rcc channels. chan_cnt:%d \n\n",
MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, chan_cnt);
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec, chan_cnt);
} else {
- WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\","
- "len (%d) channel=%d\n\n",
+ WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\", len (%d), "
+ "sec=%s, channel=%d\n\n",
MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel);
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec,
+ cfg->channel);
}
SUPP_LOG(("[%s] Connecting with " MACDBG " ssid \"%s\","
"channel:%d rcc:%d\n",
dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
ext_join_params->ssid.SSID, cfg->channel, cfg->rcc_enabled));
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
MFREE(cfg->osh, ext_join_params, join_params_size);
if (err) {
wl_clr_drv_status(cfg, CONNECTING, dev);
dhd_dev_set_blacklist_bssid(dev, NULL, 0, true);
/* Flush whitelist ssid content */
dhd_dev_set_whitelist_ssid(dev, NULL, 0, true);
-#endif /* OEM_ANDROID && ROAMEXP_SUPPORT */
- memset(&join_params, 0, sizeof(join_params));
+#endif /* ROAMEXP_SUPPORT */
+ bzero(&join_params, sizeof(join_params));
join_params_size = sizeof(join_params.ssid);
- join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ join_params.ssid.SSID_len = (uint32)min(sizeof(join_params.ssid.SSID), sme->ssid_len);
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
}
if (!err)
wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
- WL_EXT_STATUS_CONNECTING, NULL);
+ WL_EXT_STATUS_CONNECTING, NULL);
#ifdef DBG_PKT_MON
if ((dev == bcmcfg_to_prmry_ndev(cfg)) && !err) {
DHD_DBG_PKT_MON_START(dhdp);
return err;
}
-#define WAIT_FOR_DISCONNECT_MAX 10
+static void wl_cfg80211_disconnect_state_sync(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+ struct wireless_dev *wdev;
+ uint8 wait_cnt;
+
+ if (!dev || !dev->ieee80211_ptr) {
+ WL_ERR(("wrong ndev\n"));
+ return;
+ }
+
+ wdev = dev->ieee80211_ptr;
+ wait_cnt = WAIT_FOR_DISCONNECT_STATE_SYNC;
+ while ((wdev->current_bss) && wait_cnt) {
+ WL_DBG(("Waiting for disconnect sync, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(50);
+ }
+
+ if (wait_cnt == 0) {
+ /* state didn't get cleared within given timeout */
+ WL_INFORM_MEM(("cfg80211 state. wdev->current_bss non null\n"));
+ } else {
+ WL_MEM(("cfg80211 disconnect state sync done\n"));
+ }
+
+}
+
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
uint8 wait_cnt;
+ u32 status = 0;
wait_cnt = WAIT_FOR_DISCONNECT_MAX;
- while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+ while ((status = wl_get_drv_status(cfg, DISCONNECTING, dev)) && wait_cnt) {
WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
wait_cnt--;
OSL_SLEEP(50);
}
+ WL_INFORM_MEM(("Wait for disconnection done. status:%d wait_cnt:%d\n", status, wait_cnt));
+ if (!wait_cnt && wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ /* No response from firmware. Indicate connect result
+ * to clear cfg80211 state machine
+ */
+ WL_INFORM_MEM(("force send connect result\n"));
+ CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ }
return;
}
scb_val_t scbval;
bool act = false;
s32 err = 0;
- u8 *curbssid;
+ u8 *curbssid = NULL;
+ u8 null_bssid[ETHER_ADDR_LEN];
+ s32 bssidx = 0;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
WL_MSG(dev->name, "Reason %d\n", reason_code);
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_START),
+ dhd_net2idx(dhdp->info, dev), reason_code);
+#ifdef DHD_4WAYM4_FAIL_DISCONNECT
+ dhd_cleanup_m4_state_work(dhdp, dhd_net2idx(dhdp->info, dev));
+#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef ESCAN_RESULT_PATCH
- if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid &&
- (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) {
- WL_ERR(("Disconnecting from connecting device: " MACDBG "\n",
- MAC2STRDBG(curbssid)));
+ if (wl_get_drv_status(cfg, CONNECTING, dev)) {
+ if (curbssid) {
+ WL_ERR(("Disconnecting while CONNECTING status"
+ " connecting device: " MACDBG "\n", MAC2STRDBG(curbssid)));
+ } else {
+ WL_ERR(("Disconnecting while CONNECTING status \n"));
+ }
act = true;
}
#endif /* ESCAN_RESULT_PATCH */
+ if (!curbssid) {
+ WL_ERR(("Disconnecting while CONNECTING status %d\n", (int)sizeof(null_bssid)));
+ bzero(null_bssid, sizeof(null_bssid));
+ curbssid = null_bssid;
+ }
+
if (act) {
#ifdef DBG_PKT_MON
/* Stop packet monitor */
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if !defined(ESCAN_RESULT_PATCH)
/* Let scan aborted by F/W */
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
-#endif /* ESCAN_RESULT_PATCH */
+ /* Set DISCONNECTING state. We are clearing this state in all exit paths */
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
if (wl_get_drv_status(cfg, CONNECTING, dev) ||
wl_get_drv_status(cfg, CONNECTED, dev)) {
- wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = reason_code;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
if (unlikely(err)) {
wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error (%d)\n", err));
- return err;
+ goto exit;
}
wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
WL_EXT_STATUS_DISCONNECTING, NULL);
}
#endif /* WPS_SYNC */
wl_cfg80211_wait_for_disconnection(cfg, dev);
+ } else {
+ /* Not in connecting or connected state. However since disconnect came
+ * from upper layer, indicate connect fail to clear any state mismatch
+ */
+ WL_INFORM_MEM(("act is false. report connect result fail.\n"));
+ CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
}
#ifdef CUSTOM_SET_CPUCORE
/* set default cpucore */
cfg->rssi = 0; /* reset backup of rssi */
+exit:
+ /* Clear IEs for disaasoc */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ err = -EINVAL;
+ return err;
+ }
+ WL_ERR(("Clearing disconnect IEs \n"));
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, NULL, 0);
+
return err;
}
s32 bssidx;
s32 mode = wl_get_mode_by_netdev(cfg, dev);
+ WL_MSG(dev->name, "key index (%d)\n", key_idx);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.index = (u32) key_idx;
if (!ETHER_ISMULTI(mac_addr))
key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
key.iv_initialized = true;
}
-
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
- break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- break;
-#endif // endif
- default:
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ if (key.algo == CRYPTO_ALGO_OFF) { //not found.
WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
return -EINVAL;
}
struct net_info *iter, *next;
if (name == NULL) {
+ WL_ERR(("Iface name is not provided\n"));
return NULL;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strcmp(iter->ndev->name, name) == 0) {
return iter->ndev->ieee80211_ptr;
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
WL_DBG(("Iface %s not found\n", name));
return NULL;
}
s32 bssidx = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 mode = wl_get_mode_by_netdev(cfg, dev);
- WL_DBG(("key index (%d)\n", key_idx));
+#ifdef WL_GCMP
+ uint32 algos = 0, mask = 0;
+#endif /* WL_GCMP */
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ int j;
+ wsec_pmk_t pmk;
+ char keystring[WSEC_MAX_PSK_LEN + 1];
+ char* charptr = keystring;
+ u16 len;
+ struct wl_security *sec;
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_INFORM_MEM(("key index (%d) (0x%x)\n", key_idx, params->cipher));
RETURN_EIO_IF_NOT_UP(cfg);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
goto exit;
}
- memset(&key, 0, sizeof(key));
+
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_KEY), dhd_net2idx(dhdp->info, dev), 0);
+
+ bzero(&key, sizeof(key));
/* Clear any buffered wep key */
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
key.len = (u32) params->key_len;
key.index = (u32) key_idx;
return -EINVAL;
}
memcpy(key.data, params->key, key.len);
+
key.flags = WL_PRIMARY_KEY;
+
+ key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
+ val = wl_rsn_cipher_wsec_algo_lookup(params->cipher);
+ if (val == WSEC_NONE) {
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+#if defined(WLAN_CIPHER_SUITE_PMK)
+ /* WLAN_CIPHER_SUITE_PMK is not NL80211 standard ,but BRCM proprietary cipher suite.
+ * so it doesn't have right algo type too. Just for now, bypass this check for
+ * backward compatibility.
+ * TODO: deprecate this proprietary way and replace to nl80211 set_pmk API.
+ */
+ if (params->cipher != WLAN_CIPHER_SUITE_PMK)
+#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
+ return -EINVAL;
+ }
switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- val = WEP_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- val = WEP_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
- break;
case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- val = TKIP_ENABLED;
/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
if (mode == WL_MODE_BSS) {
bcopy(&key.data[24], keybuf, sizeof(keybuf));
}
WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- val = AES_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- val = AES_ENABLED;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
- break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- val = SMS4_ENABLED;
- break;
-#endif /* BCMWAPI_WPI */
#if defined(WLAN_CIPHER_SUITE_PMK)
- case WLAN_CIPHER_SUITE_PMK: {
- int j;
- wsec_pmk_t pmk;
- char keystring[WSEC_MAX_PSK_LEN + 1];
- char* charptr = keystring;
- uint len;
- struct wl_security *sec;
-
+ case WLAN_CIPHER_SUITE_PMK:
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
- if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) {
+
+ WL_MEM(("set_pmk: wpa_auth:%x akm:%x\n", sec->wpa_auth, params->cipher));
+ /* Avoid pmk set for SAE and OWE for external supplicant case. */
+ if (IS_AKM_SAE(sec->wpa_auth) || IS_AKM_OWE(sec->wpa_auth)) {
+ WL_INFORM_MEM(("skip pmk set for akm:%x\n", sec->wpa_auth));
+ break;
+ }
+
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
err = wldev_iovar_setbuf(dev, "okc_info_pmk", (const void *)params->key,
WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
if (err) {
for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
charptr += snprintf(charptr, sizeof(keystring), "%02x", params->key[j]);
}
- len = strlen(keystring);
+ len = (u16)strlen(keystring);
pmk.key_len = htod16(len);
bcopy(keystring, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
err = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (err)
+ if (err) {
return err;
+ }
/* Clear key length to delete key */
key.len = 0;
- } break;
+ break;
#endif /* WLAN_CIPHER_SUITE_PMK */
- default:
- WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
- return -EINVAL;
+#ifdef WL_GCMP
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ algos = KEY_ALGO_MASK(key.algo);
+ mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ break;
+#endif /* WL_GCMP */
+ default: /* No post processing required */
+ WL_DBG(("no post processing required (0x%x)\n", params->cipher));
+ break;
}
/* Set the new key/index */
WL_ERR(("set wsec error (%d)\n", err));
return err;
}
+#ifdef WL_GCMP
+ wl_set_wsec_info_algos(dev, algos, mask);
+#endif /* WL_GCMP */
wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
WL_EXT_STATUS_ADD_KEY, NULL);
-
return err;
}
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
s32 bssidx;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
#endif // endif
RETURN_EIO_IF_NOT_UP(cfg);
- memset(&key, 0, sizeof(key));
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DELETE_KEY), dhd_net2idx(dhdp->info, dev), 0);
+ bzero(&key, sizeof(key));
key.flags = WL_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
}
WL_DBG(("key index (%d)\n", key_idx));
RETURN_EIO_IF_NOT_UP(cfg);
- memset(&key, 0, sizeof(key));
+ bzero(&key, sizeof(key));
key.index = key_idx;
swap_key_to_BE(&key);
- memset(¶ms, 0, sizeof(params));
+ bzero(¶ms, sizeof(params));
params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
params.key = key.data;
#endif /* MFP */
}
-static int
-wl_cfg80211_ifstats_counters_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+static bool
+wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
- switch (type) {
- case WL_IFSTATS_XTLV_IF_INDEX:
- WL_DBG(("Stats received on interface index: %d\n", *data));
- break;
- case WL_IFSTATS_XTLV_GENERIC: {
- if (len > sizeof(wl_if_stats_t)) {
- WL_INFORM(("type 0x%x: cntbuf length too long! %d > %d\n",
- type, len, (int)sizeof(wl_if_stats_t)));
- }
- memcpy(ctx, data, sizeof(wl_if_stats_t));
- break;
- }
- default:
- WL_DBG(("Unsupported counter type 0x%x\n", type));
- break;
+ wl_assoc_info_t asinfo;
+ uint32 state = 0;
+ int err;
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("failed to get assoc_info : err=%d\n", err));
+ return FALSE;
+ } else {
+ memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
+ state = dtoh32(asinfo.state);
+ WL_DBG(("assoc state=%d\n", state));
}
- return BCME_OK;
+ return (state > 0)? TRUE:FALSE;
}
-/* Parameters to if_counters iovar need to be converted to XTLV format
- * before sending to FW. The length of the top level XTLV container
- * containing parameters should not exceed 228 bytes
- */
-#define IF_COUNTERS_PARAM_CONTAINER_LEN_MAX 228
-
-int
-wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats)
+static s32
+wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- uint8 *pbuf = NULL;
- bcm_xtlvbuf_t xtlvbuf, local_xtlvbuf;
- bcm_xtlv_t *xtlv;
- uint16 expected_resp_len;
- wl_stats_report_t *request = NULL, *response = NULL;
- int bsscfg_idx;
- int ret = BCME_OK;
+ s32 err = BCME_OK;
+ scb_val_t scb_val;
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ wl_rssi_ant_mimo_t rssi_ant_mimo;
+#endif /* SUPPORT_RSSI_SUM_REPORT */
- pbuf = (uint8 *)MALLOCZ(dhdp->osh, WLC_IOCTL_MEDLEN);
- if (!pbuf) {
- WL_ERR(("Failed to allocate local pbuf\n"));
- return BCME_NOMEM;
+ if (dev == NULL || cfg == NULL) {
+ return BCME_ERROR;
}
- /* top level container length cannot exceed 228 bytes.
- * This is because the output buffer is 1535 bytes long.
- * Allow 1300 bytes for reporting stats coming in XTLV format
+ /* initialize rssi */
+ *rssi = 0;
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+ /* Query RSSI sum across antennas */
+ bzero(&rssi_ant_mimo, sizeof(rssi_ant_mimo));
+ err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
+ if (err) {
+ WL_ERR(("Could not get rssi sum (%d)\n", err));
+ /* set rssi to zero and do not return error,
+ * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
+ * when bssid was null during roaming
+ */
+ err = BCME_OK;
+ } else {
+ cfg->rssi_sum_report = TRUE;
+ if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
+ *rssi = 0;
+ }
+ }
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+
+ /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
+ if (cfg->rssi_sum_report == FALSE) {
+ bzero(&scb_val, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t));
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ return err;
+ }
+#if defined(RSSIOFFSET)
+ *rssi = wl_update_rssi_offset(dev, dtoh32(scb_val.val));
+#else
+ *rssi = dtoh32(scb_val.val);
+#endif
+ }
+
+ if (*rssi >= 0) {
+ /* check assoc status including roaming */
+ DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
+ if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
+ *rssi = cfg->rssi; /* use previous RSSI */
+ WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
+ } else {
+ *rssi = 0;
+ }
+ DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ } else {
+ /* backup the current rssi */
+ cfg->rssi = *rssi;
+ }
+
+ return err;
+}
+
+static int
+wl_cfg80211_ifstats_counters_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
+{
+ switch (type) {
+ case WL_IFSTATS_XTLV_IF_INDEX:
+ WL_DBG(("Stats received on interface index: %d\n", *data));
+ break;
+ case WL_IFSTATS_XTLV_GENERIC: {
+ if (len > sizeof(wl_if_stats_t)) {
+ WL_INFORM(("type 0x%x: cntbuf length too long! %d > %d\n",
+ type, len, (int)sizeof(wl_if_stats_t)));
+ }
+ memcpy(ctx, data, sizeof(wl_if_stats_t));
+ break;
+ }
+ default:
+ WL_DBG(("Unsupported counter type 0x%x\n", type));
+ break;
+ }
+
+ return BCME_OK;
+}
+
+/* Parameters to if_counters iovar need to be converted to XTLV format
+ * before sending to FW. The length of the top level XTLV container
+ * containing parameters should not exceed 228 bytes
+ */
+#define IF_COUNTERS_PARAM_CONTAINER_LEN_MAX 228
+
+int
+wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ uint8 *pbuf = NULL;
+ bcm_xtlvbuf_t xtlvbuf, local_xtlvbuf;
+ bcm_xtlv_t *xtlv;
+ uint16 expected_resp_len;
+ wl_stats_report_t *request = NULL, *response = NULL;
+ int bsscfg_idx;
+ int ret = BCME_OK;
+
+ pbuf = (uint8 *)MALLOCZ(dhdp->osh, WLC_IOCTL_MEDLEN);
+ if (!pbuf) {
+ WL_ERR(("Failed to allocate local pbuf\n"));
+ return BCME_NOMEM;
+ }
+
+ /* top level container length cannot exceed 228 bytes.
+ * This is because the output buffer is 1535 bytes long.
+ * Allow 1300 bytes for reporting stats coming in XTLV format
*/
request = (wl_stats_report_t *)
MALLOCZ(dhdp->osh, IF_COUNTERS_PARAM_CONTAINER_LEN_MAX);
}
#undef IF_COUNTERS_PARAM_CONTAINER_LEN_MAX
-static bool
-wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
-{
- wl_assoc_info_t asinfo;
- uint32 state = 0;
- int err;
-
- err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
- NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("failed to get assoc_info : err=%d\n", err));
- return FALSE;
- } else {
- memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
- state = dtoh32(asinfo.state);
- WL_DBG(("assoc state=%d\n", state));
- }
-
- return (state > 0)? TRUE:FALSE;
-}
-
-static s32
-wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
-{
- s32 err = BCME_OK;
- scb_val_t scb_val;
-#ifdef SUPPORT_RSSI_SUM_REPORT
- wl_rssi_ant_mimo_t rssi_ant_mimo;
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
- if (dev == NULL || cfg == NULL) {
- return BCME_ERROR;
- }
-
- /* initialize rssi */
- *rssi = 0;
-
-#ifdef SUPPORT_RSSI_SUM_REPORT
- /* Query RSSI sum across antennas */
- memset(&rssi_ant_mimo, 0, sizeof(rssi_ant_mimo));
- err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
- if (err) {
- WL_ERR(("Could not get rssi sum (%d)\n", err));
- /* set rssi to zero and do not return error,
- * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
- * when bssid was null during roaming
- */
- err = BCME_OK;
- } else {
- cfg->rssi_sum_report = TRUE;
- if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
- *rssi = 0;
- }
- }
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
- /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
- if (cfg->rssi_sum_report == FALSE) {
- memset(&scb_val, 0, sizeof(scb_val));
- scb_val.val = 0;
- err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
- sizeof(scb_val_t));
- if (err) {
- WL_ERR(("Could not get rssi (%d)\n", err));
- return err;
- }
-#if defined(RSSIOFFSET)
- *rssi = wl_update_rssi_offset(dev, dtoh32(scb_val.val));
-#else
- *rssi = dtoh32(scb_val.val);
-#endif
- }
-
- if (*rssi >= 0) {
- /* check assoc status including roaming */
- DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
- if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
- *rssi = cfg->rssi; /* use previous RSSI */
- WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
- } else {
- *rssi = 0;
- }
- DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- } else {
- /* backup the current rssi */
- cfg->rssi = *rssi;
- }
-
- return err;
-}
-
static s32
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
#else
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ u8 *mac, struct station_info *sinfo)
#endif // endif
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 rssi = 0;
- s32 rate;
+ s32 rate = 0;
s32 err = 0;
- sta_info_v4_t *sta;
- s32 mode;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+ u16 wl_iftype = 0;
+ u16 wl_mode = 0;
+ get_pktcnt_t pktcnt;
+ wl_if_stats_t *if_stats = NULL;
+ sta_info_v4_t *sta = NULL;
+ u8 *curmacp = NULL;
s8 eabuf[ETHER_ADDR_STR_LEN];
-#endif // endif
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
bool fw_assoc_state = FALSE;
u32 dhd_assoc_state = 0;
void *buf;
- static int err_cnt = 0;
RETURN_EIO_IF_NOT_UP(cfg);
- mode = wl_get_mode_by_netdev(cfg, dev);
- if (mode < 0) {
- return -ENODEV;
+ if (cfg80211_to_wl_iftype(dev->ieee80211_ptr->iftype, &wl_iftype, &wl_mode) < 0) {
+ return -EINVAL;
}
buf = MALLOC(cfg->osh, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
if (buf == NULL) {
- WL_ERR(("%s(%d): MALLOC failed\n", __FUNCTION__, __LINE__));
+ WL_ERR(("wl_cfg80211_get_station: MALLOC failed\n"));
goto error;
}
- if (mode == WL_MODE_AP) {
- err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
- ETHER_ADDR_LEN, buf, WLC_IOCTL_SMLEN, NULL);
- if (err < 0) {
- WL_ERR(("GET STA INFO failed, %d\n", err));
- goto error;
- }
- sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
- sta = (sta_info_v4_t *)buf;
- if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
- WL_ERR(("GET STA INFO version mismatch, %d\n", err));
- return BCME_VERSION;
- }
- sta->len = dtoh16(sta->len);
- sta->cap = dtoh16(sta->cap);
- sta->flags = dtoh32(sta->flags);
- sta->idle = dtoh32(sta->idle);
- sta->in = dtoh32(sta->in);
- sinfo->inactive_time = sta->idle * 1000;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
- if (sta->flags & WL_STA_ASSOC) {
- sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
- sinfo->connected_time = sta->in;
- }
- WL_INFORM_MEM(("[%s] STA %s : idle time : %d sec, connected time :%d ms\n",
- dev->name, bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
- sinfo->inactive_time, sta->idle * 1000));
-#endif // endif
- } else if ((mode == WL_MODE_BSS) || (mode == WL_MODE_IBSS)) {
- get_pktcnt_t pktcnt;
- wl_if_stats_t *if_stats = NULL;
- u8 *curmacp;
-
- if (cfg->roam_offload) {
- struct ether_addr bssid;
- memset(&bssid, 0, sizeof(bssid));
- err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- if (err) {
- WL_ERR(("Failed to get current BSSID\n"));
- } else {
- if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
- /* roaming is detected */
- err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
- if (err)
- WL_ERR(("Failed to handle the delayed roam, "
- "err=%d", err));
- mac = (u8 *)bssid.octet;
+
+ switch (wl_iftype) {
+ case WL_IF_TYPE_STA:
+ case WL_IF_TYPE_IBSS:
+ if (cfg->roam_offload) {
+ struct ether_addr bssid;
+ bzero(&bssid, sizeof(bssid));
+ err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err) {
+ WL_ERR(("Failed to get current BSSID\n"));
+ } else {
+ if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ /* roaming is detected */
+ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+ if (err)
+ WL_ERR(("Failed to handle the delayed"
+ " roam, err=%d", err));
+ mac = (u8 *)bssid.octet;
+ }
}
}
- }
- dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
- DHD_OS_WAKE_LOCK(dhd);
- fw_assoc_state = dhd_is_associated(dhd, 0, &err);
- if (dhd_assoc_state && !fw_assoc_state) {
- /* check roam (join) status */
- if (wl_check_assoc_state(cfg, dev)) {
- fw_assoc_state = TRUE;
- WL_DBG(("roam status\n"));
- }
- }
- DHD_OS_WAKE_UNLOCK(dhd);
- if (!dhd_assoc_state || !fw_assoc_state) {
- WL_ERR(("NOT assoc\n"));
- if (err == -ENODATA)
- goto error;
- if (!dhd_assoc_state) {
- WL_TRACE_HW4(("drv state is not connected \n"));
- }
- if (!fw_assoc_state) {
- WL_TRACE_HW4(("fw state is not associated \n"));
+ dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+ DHD_OS_WAKE_LOCK(dhd);
+ fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+ if (dhd_assoc_state && !fw_assoc_state) {
+ /* check roam (join) status */
+ if (wl_check_assoc_state(cfg, dev)) {
+ fw_assoc_state = TRUE;
+ WL_DBG(("roam status\n"));
+ }
}
- /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms.
- * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
- * means that BSSID is null.
- */
- if (dhd_assoc_state && !fw_assoc_state && !err) {
- if (!fw_assoc_watchdog_started) {
- fw_assoc_watchdog_ms = OSL_SYSUPTIME();
- fw_assoc_watchdog_started = TRUE;
- WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
- } else {
- if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
- FW_ASSOC_WATCHDOG_TIME) {
+ DHD_OS_WAKE_UNLOCK(dhd);
+ if (!dhd_assoc_state || !fw_assoc_state) {
+ WL_ERR(("NOT assoc\n"));
+ if (err == -ENODATA)
+ goto error;
+ if (!dhd_assoc_state) {
+ WL_TRACE_HW4(("drv state is not connected \n"));
+ }
+ if (!fw_assoc_state) {
+ WL_TRACE_HW4(("fw state is not associated \n"));
+ }
+ /* Disconnect due to fw is not associated for
+ * FW_ASSOC_WATCHDOG_TIME ms.
+ * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+ * means that BSSID is null.
+ */
+ if (dhd_assoc_state && !fw_assoc_state && !err) {
+ if (!fw_assoc_watchdog_started) {
+ fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+ fw_assoc_watchdog_started = TRUE;
+ WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+ } else if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+ FW_ASSOC_WATCHDOG_TIME) {
fw_assoc_watchdog_started = FALSE;
err = -ENODEV;
WL_TRACE_HW4(("fw is not associated for %d ms \n",
goto get_station_err;
}
}
+ err = -ENODEV;
+ goto error;
}
- err = -ENODEV;
- goto error;
- }
- if (dhd_is_associated(dhd, 0, NULL)) {
- fw_assoc_watchdog_started = FALSE;
- }
- curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
- if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
- WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
- MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
- }
-
- /* Report the current tx rate */
- rate = 0;
- err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
- if (err) {
- WL_ERR(("Could not get rate (%d)\n", err));
- } else {
+ if (dhd_is_associated(dhd, 0, NULL)) {
+ fw_assoc_watchdog_started = FALSE;
+ }
+ curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+ MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+ }
+ /* go through to get another information */
+ case WL_IF_TYPE_P2P_GC:
+ case WL_IF_TYPE_P2P_DISC:
+ if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
+ goto get_station_err;
+ }
+#if defined(RSSIAVG)
+ err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
+ }
+ wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
+#if defined(RSSIOFFSET)
+ rssi = wl_update_rssi_offset(dev, rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(rssi, RSSI_MAXVAL);
+#endif
+ sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+ /* go through to get another information */
+ case WL_IF_TYPE_P2P_GO:
+ /* Report the current tx rate */
+ rate = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- int rxpktglom;
+ int rxpktglom;
#endif // endif
- rate = dtoh32(rate);
- sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
- sinfo->txrate.legacy = rate * 5;
- WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ rate = dtoh32(rate);
+ sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- rxpktglom = ((rate/2) > 150) ? 20 : 10;
-
- if (maxrxpktglom != rxpktglom) {
- maxrxpktglom = rxpktglom;
- WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
- maxrxpktglom));
- err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
- (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+ if (maxrxpktglom != rxpktglom) {
+ maxrxpktglom = rxpktglom;
+ WL_DBG(("Rate %d Mbps, update bus:"
+ "maxtxpktglom=%d\n", (rate/2), maxrxpktglom));
+ err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+ (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ }
}
- }
#endif // endif
- }
-
- if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
- goto get_station_err;
- }
-#if defined(RSSIAVG)
- err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
- if (err) {
- WL_ERR(("Could not get rssi (%d)\n", err));
- goto get_station_err;
- }
- wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
- wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
-#endif
-#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(dev, rssi);
-#endif
-#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
- // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
- rssi = MIN(rssi, RSSI_MAXVAL);
-#endif
- sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
- sinfo->signal = rssi;
- WL_DBG(("RSSI %d dBm\n", rssi));
-
- if_stats = (wl_if_stats_t *)buf;
- memset(if_stats, 0, sizeof(*if_stats));
-
- if (FW_SUPPORTED(dhd, ifst)) {
- err = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else
- {
- err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
- (char *)if_stats, sizeof(*if_stats), NULL);
- }
-
- if (err) {
- // WL_ERR(("if_counters not supported ret=%d\n", err));
- memset(&pktcnt, 0, sizeof(pktcnt));
- err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
- sizeof(pktcnt));
- if (!err) {
- sinfo->rx_packets = pktcnt.rx_good_pkt;
- sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
- sinfo->tx_packets = pktcnt.tx_good_pkt;
- sinfo->tx_failed = pktcnt.tx_bad_pkt;
}
- } else {
- sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
- sinfo->rx_dropped_misc = 0;
- sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
- sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
- (uint32)dtoh64(if_stats->txrunt) +
- (uint32)dtoh64(if_stats->txfail);
- }
-
- sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
- STA_INFO_BIT(INFO_RX_DROP_MISC) |
- STA_INFO_BIT(INFO_TX_PACKETS) |
- STA_INFO_BIT(INFO_TX_FAILED));
+ if_stats = (wl_if_stats_t *)buf;
+ bzero(if_stats, sizeof(*if_stats));
+ if (FW_SUPPORTED(dhd, ifst)) {
+ err = wl_cfg80211_ifstats_counters(dev, if_stats);
+ } else
+ {
+ err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
+ }
+ if (err) {
+// WL_ERR(("if_counters not supported ret=%d\n", err));
+ bzero(&pktcnt, sizeof(pktcnt));
+ err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
+ sizeof(pktcnt));
+ if (!err) {
+ sinfo->rx_packets = pktcnt.rx_good_pkt;
+ sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+ sinfo->tx_packets = pktcnt.tx_good_pkt;
+ sinfo->tx_failed = pktcnt.tx_bad_pkt;
+ }
+ } else {
+ sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
+ sinfo->rx_dropped_misc = 0;
+ sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
+ sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
+ (uint32)dtoh64(if_stats->txrunt) +
+ (uint32)dtoh64(if_stats->txfail);
+ }
+
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+ STA_INFO_BIT(INFO_RX_DROP_MISC) |
+ STA_INFO_BIT(INFO_TX_PACKETS) |
+ STA_INFO_BIT(INFO_TX_FAILED));
get_station_err:
- if (err)
- err_cnt++;
- else
- err_cnt = 0;
- if (err_cnt >= 3 && (err != -ENODATA)) {
- /* Disconnect due to zero BSSID or error to get RSSI */
- scb_val_t scbval;
- scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
- err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
- if (unlikely(err)) {
- WL_ERR(("disassoc error (%d)\n", err));
- }
+ if (err && (err != -ENODATA)) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ scb_val_t scbval;
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+ scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ WL_ERR(("disassoc error (%d)\n", err));
+ }
- WL_ERR(("force cfg80211_disconnected: %d\n", err));
- wl_clr_drv_status(cfg, CONNECTED, dev);
- CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
- wl_link_down(cfg);
- }
- }
- else {
- WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+ WL_ERR(("force cfg80211_disconnected: %d\n", err));
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_DONE),
+ dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_link_down(cfg);
+ }
+ break;
+ case WL_IF_TYPE_AP:
+ err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
+ ETHER_ADDR_LEN, buf, WLC_IOCTL_SMLEN, NULL);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ goto error;
+ }
+ sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
+ sta = (sta_info_v4_t *)buf;
+ if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
+ WL_ERR(("GET STA INFO version mismatch, %d\n", err));
+ return BCME_VERSION;
+ }
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
+ sinfo->connected_time = sta->in;
+ }
+#endif // endif
+ WL_INFORM_MEM(("STA %s, flags 0x%x, idle time %ds, connected time %ds\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
+ sta->flags, sta->idle, sta->in));
+ break;
+ default :
+ WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
}
error:
if (buf) {
MFREE(cfg->osh, buf, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
}
+
return err;
}
rtt_status_info_t *rtt_status;
#endif /* RTT_SUPPORT */
dhd_pub_t *dhd = cfg->pub;
-
RETURN_EIO_IF_NOT_UP(cfg);
WL_DBG(("Enter\n"));
err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
if (err)
- WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+ WL_ERR(("error (%d)\n", err));
else if (pm != -1 && dev->ieee80211_ptr)
dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
}
-void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if (strcmp(command, "SCAN-ACTIVE") == 0) {
- cfg->active_scan = 1;
- } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
- cfg->active_scan = 0;
- } else
- WL_ERR(("Unknown command \n"));
- return;
-}
-
static __used u32 wl_find_msb(u16 bit16)
{
u32 ret = 0;
if (iter->ndev)
wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
if (cfg->scan_request) {
wl_notify_scan_done(cfg, true);
cfg->scan_request = NULL;
wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
s32 err)
{
int i, j;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- wl_wlc_version_t* cur_wl_ver = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- int npmkids = (cfg->pmk_list->pmkids.length - (sizeof(u16)*2)) / sizeof(pmkid_v2_t);
- uint16 wlc_ver = 0;
+ int npmkids = cfg->pmk_list->pmkids.count;
ASSERT(cfg->pmk_list->pmkids.length >= (sizeof(u16)*2));
if (!pmk_list) {
WL_DBG(("No of elements %d\n", npmkids));
for (i = 0; i < npmkids; i++) {
WL_DBG(("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].BSSID));
+ &pmk_list->pmkids.pmkid[i].bssid));
for (j = 0; j < WPA2_PMKID_LEN; j++) {
- WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].pmkid[j]));
}
}
- /* Check version first */
- err = wldev_iovar_getbuf(dev, "wlc_ver", NULL, 0,
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (likely(!err)) {
- cur_wl_ver = (wl_wlc_version_t *)ioctl_buf;
- wlc_ver = cur_wl_ver->wlc_ver_major;
- }
- else {
- WL_DBG(("Querying version error: %d\n", err));
-// return err;
- }
- if (wlc_ver >= WL_MIN_PMKID_LIST_V2_FW_MAJOR) {
- pmk_list->pmkids.version = PMKID_LIST_VER_2;
+ if (cfg->wlc_ver.wlc_ver_major >= MIN_PMKID_LIST_V3_FW_MAJOR) {
+ pmk_list->pmkids.version = PMKID_LIST_VER_3;
err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
sizeof(*pmk_list), cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
}
+ else if (cfg->wlc_ver.wlc_ver_major == MIN_PMKID_LIST_V2_FW_MAJOR) {
+ u32 v2_list_size = (u32)(sizeof(pmkid_list_v2_t) + npmkids*sizeof(pmkid_v2_t));
+ pmkid_list_v2_t *pmkid_v2_list = (pmkid_list_v2_t *)MALLOCZ(cfg->osh, v2_list_size);
+
+ if (pmkid_v2_list == NULL) {
+ WL_ERR(("failed to allocate pmkid list\n"));
+ return BCME_NOMEM;
+ }
+
+ pmkid_v2_list->version = PMKID_LIST_VER_2;
+ /* Account for version, length and pmkid_v2_t fields */
+ pmkid_v2_list->length = (npmkids * sizeof(pmkid_v2_t)) + (2 * sizeof(u16));
+
+ for (i = 0; i < npmkids; i++) {
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v2_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+
+ /* copy pmkid if available */
+ if (pmk_list->pmkids.pmkid[i].pmkid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN,
+ pmk_list->pmkids.pmkid[i].pmkid,
+ pmk_list->pmkids.pmkid[i].pmkid_len);
+ }
+
+ if (pmk_list->pmkids.pmkid[i].pmk_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len,
+ pmk_list->pmkids.pmkid[i].pmk,
+ pmk_list->pmkids.pmkid[i].pmk_len);
+ pmkid_v2_list->pmkid[i].pmk_len = pmk_list->pmkids.pmkid[i].pmk_len;
+ }
+
+ if (pmk_list->pmkids.pmkid[i].ssid_len) {
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].ssid.ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len,
+ pmk_list->pmkids.pmkid[i].ssid,
+ pmk_list->pmkids.pmkid[i].ssid_len);
+ pmkid_v2_list->pmkid[i].ssid.ssid_len
+ = pmk_list->pmkids.pmkid[i].ssid_len;
+ }
+
+ (void)memcpy_s(pmkid_v2_list->pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN, &pmk_list->pmkids.pmkid[i].fils_cache_id,
+ FILS_CACHE_ID_LEN);
+ pmkid_v2_list->pmkid[i].length = PMKID_ELEM_V2_LENGTH;
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v2_list,
+ v2_list_size, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("pmkid_info failed (%d)\n", err));
+ }
+
+ MFREE(cfg->osh, pmkid_v2_list, v2_list_size);
+ }
else {
- u32 v1_list_size = sizeof(pmkid_list_v1_t) + npmkids*sizeof(pmkid_v1_t);
+ u32 v1_list_size = (u32)(sizeof(pmkid_list_v1_t) + npmkids*sizeof(pmkid_v1_t));
pmkid_list_v1_t *pmkid_v1_list = (pmkid_list_v1_t *)MALLOCZ(cfg->osh, v1_list_size);
if (pmkid_v1_list == NULL) {
WL_ERR(("failed to allocate pmkid list\n"));
return BCME_NOMEM;
}
for (i = 0; i < npmkids; i++) {
- memcpy(&pmkid_v1_list->pmkid[i].BSSID, &pmk_list->pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN);
- memcpy(pmkid_v1_list->pmkid[i].PMKID, pmk_list->pmkids.pmkid[i].PMKID,
- WPA2_PMKID_LEN);
+ /* memcpy_s return checks not needed as buffers are of same size */
+ (void)memcpy_s(&pmkid_v1_list->pmkid[i].BSSID,
+ ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
+ ETHER_ADDR_LEN);
+ (void)memcpy_s(pmkid_v1_list->pmkid[i].PMKID,
+ WPA2_PMKID_LEN, pmk_list->pmkids.pmkid[i].pmkid,
+ WPA2_PMKID_LEN);
pmkid_v1_list->npmkid++;
}
err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v1_list,
return err;
}
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
int i;
- int npmkids = (cfg->pmk_list->pmkids.length - (sizeof(uint16) * 2)) / sizeof(pmkid_v2_t);
+ int npmkids = cfg->pmk_list->pmkids.count;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
RETURN_EIO_IF_NOT_UP(cfg);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(INSTALL_PMKSA), dhd_net2idx(dhdp->info, dev), 0);
for (i = 0; i < npmkids; i++) {
if (pmksa->bssid != NULL) {
- if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
ETHER_ADDR_LEN))
break;
}
#ifdef WL_FILS
else if (pmksa->ssid != NULL) {
- if (!memcmp(pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid.ssid,
+ if (!memcmp(pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
pmksa->ssid_len))
break;
}
}
if (i < WL_NUM_PMKIDS_MAX) {
if (pmksa->bssid != NULL) {
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].bssid, pmksa->bssid,
ETHER_ADDR_LEN);
}
#ifdef WL_FILS
else if (pmksa->ssid != NULL) {
- cfg->pmk_list->pmkids.pmkid[i].pmk_len = pmksa->pmk_len;
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].pmk, pmksa->pmk,
- pmksa->pmk_len);
- cfg->pmk_list->pmkids.pmkid[i].ssid.ssid_len = pmksa->ssid_len;
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].ssid.ssid, pmksa->ssid,
+ cfg->pmk_list->pmkids.pmkid[i].ssid_len = pmksa->ssid_len;
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].ssid, pmksa->ssid,
pmksa->ssid_len);
memcpy(&cfg->pmk_list->pmkids.pmkid[i].fils_cache_id, pmksa->cache_id,
FILS_CACHE_ID_LEN);
}
#endif /* WL_FILS */
- memcpy(cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS))
+ if (pmksa->pmk_len) {
+ if (memcpy_s(&cfg->pmk_list->pmkids.pmkid[i].pmk, PMK_LEN_MAX, pmksa->pmk,
+ pmksa->pmk_len)) {
+ WL_ERR(("invalid pmk len = %lu", pmksa->pmk_len));
+ } else {
+ cfg->pmk_list->pmkids.pmkid[i].pmk_len = pmksa->pmk_len;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS) */
+ /* return check not required as buffer lengths are same */
+ (void)memcpy_s(cfg->pmk_list->pmkids.pmkid[i].pmkid, WPA2_PMKID_LEN, pmksa->pmkid,
WPA2_PMKID_LEN);
- cfg->pmk_list->pmkids.pmkid[i].length = PMKID_ELEM_V2_LENGTH;
+ cfg->pmk_list->pmkids.pmkid[i].pmkid_len = WPA2_PMKID_LEN;
+
+ /* set lifetime not to expire in firmware by default.
+ * Currently, wpa_supplicant control PMKID lifetime on his end. e.g) set 12 hours
+ * when it expired, wpa_supplicant should call set_pmksa/del_pmksa to update
+ * corresponding entry.
+ */
+ cfg->pmk_list->pmkids.pmkid[i].time_left = KEY_PERM_PMK;
if (i == npmkids) {
- cfg->pmk_list->pmkids.length += sizeof(pmkid_v2_t);
+ cfg->pmk_list->pmkids.length += sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count++;
}
} else {
err = -EINVAL;
#if (WL_DBG_LEVEL > 0)
if (pmksa->bssid != NULL) {
WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &cfg->pmk_list->pmkids.pmkid[npmkids - 1].BSSID));
+ &cfg->pmk_list->pmkids.pmkid[npmkids - 1].bssid));
}
for (i = 0; i < WPA2_PMKID_LEN; i++) {
WL_DBG(("%02x\n",
cfg->pmk_list->pmkids.pmkid[npmkids - 1].
- PMKID[i]));
+ pmkid[i]));
}
#endif /* (WL_DBG_LEVEL > 0) */
return err;
}
+/* sending pmkid_info IOVAR to manipulate PMKID(PMKSA) list in firmware.
+ * input @pmksa: host given single pmksa info.
+ * if it's NULL, assume whole list manipulated. e.g) flush all PMKIDs in firmware.
+ * input @set: TRUE means adding PMKSA operation. FALSE means deleting.
+ * return: log internal BCME_XXX error, and convert it to -EINVAL to linux generic error code.
+ */
+static s32 wl_cfg80211_update_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa, bool set) {
+
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ pmkid_list_v3_t *pmk_list;
+ uint32 alloc_len;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ if (cfg->wlc_ver.wlc_ver_major < MIN_PMKID_LIST_V3_FW_MAJOR) {
+ WL_ERR(("wlc_ver_major not supported:%d\n", cfg->wlc_ver.wlc_ver_major));
+ return BCME_VERSION;
+ }
+
+ alloc_len = (uint32) OFFSETOF(pmkid_list_v3_t, pmkid) + ((pmksa) ? sizeof(pmkid_v3_t) : 0);
+ pmk_list = (pmkid_list_v3_t *)MALLOCZ(cfg->osh, alloc_len);
+
+ if (pmk_list == NULL) {
+ return BCME_NOMEM;
+ }
+
+ pmk_list->version = PMKID_LIST_VER_3;
+ pmk_list->length = alloc_len;
+ pmk_list->count = (pmksa) ? 1 : 0; // 1 means single entry operation, 0 means whole list.
+
+ /* controll set/del action by lifetime parameter accordingly.
+ * if set == TRUE, it's set PMKID action with lifetime permanent.
+ * if set == FALSE, it's del PMKID action with lifetime zero.
+ */
+ pmk_list->pmkid->time_left = (set) ? KEY_PERM_PMK : 0;
+
+ if (pmksa) {
+ if (pmksa->bssid) {
+ err = memcpy_s(&pmk_list->pmkid->bssid, sizeof(pmk_list->pmkid->bssid),
+ pmksa->bssid, ETHER_ADDR_LEN);
+ if (err) {
+ goto exit;
+ }
+ }
+ if (pmksa->pmkid) {
+ err = memcpy_s(&pmk_list->pmkid->pmkid, sizeof(pmk_list->pmkid->pmkid),
+ pmksa->pmkid, WPA2_PMKID_LEN);
+ if (err) {
+ goto exit;
+ }
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ if (pmksa->pmk) {
+ err = memcpy_s(&pmk_list->pmkid->pmk, sizeof(pmk_list->pmkid->pmk),
+ pmksa->pmk, pmksa->pmk_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->pmk_len = pmksa->pmk_len;
+ }
+ if (pmksa->ssid) {
+ err = memcpy_s(&pmk_list->pmkid->ssid, sizeof(pmk_list->pmkid->ssid),
+ pmksa->ssid, pmksa->ssid_len);
+ if (err) {
+ goto exit;
+ }
+ pmk_list->pmkid->ssid_len = pmksa->ssid_len;
+ }
+ if (pmksa->cache_id) {
+ pmk_list->pmkid->fils_cache_id = *(uint16 *)pmksa->cache_id;
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
+ }
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ alloc_len, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+exit:
+ if (pmk_list) {
+ MFREE(cfg->osh, pmk_list, alloc_len);
+ }
+ return err;
+}
+
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
int i;
- int npmkids = (cfg->pmk_list->pmkids.length - sizeof(uint16)*2) / sizeof(pmkid_v2_t);
+ int npmkids = cfg->pmk_list->pmkids.count;
RETURN_EIO_IF_NOT_UP(cfg);
+ if (!pmksa) {
+ WL_ERR(("pmksa is not initialized\n"));
+ return BCME_ERROR;
+ }
+ if (!npmkids) {
+ /* nmpkids = 0, nothing to delete */
+ WL_DBG(("npmkids=0. Skip del\n"));
+ return BCME_OK;
+ }
+
#if (WL_DBG_LEVEL > 0)
if (pmksa->bssid) {
WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
WL_DBG(("\n"));
}
#endif /* WL_FILS */
- for (i = 0; i < WPA2_PMKID_LEN; i++) {
- WL_DBG(("%02x\n", pmksa->pmkid[i]));
+ if (pmksa->pmkid) {
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmksa->pmkid[i]));
+ }
}
#endif /* (WL_DBG_LEVEL > 0) */
for (i = 0; i < npmkids; i++) {
if (pmksa->bssid) {
if (!memcmp
- (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
ETHER_ADDR_LEN)) {
break;
}
#ifdef WL_FILS
else if (pmksa->ssid) {
if (!memcmp
- (pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid.ssid,
+ (pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
pmksa->ssid_len)) {
break;
}
}
#endif /* WL_FILS */
}
-
if ((npmkids > 0) && (i < npmkids)) {
- memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_v2_t));
+ bzero(&cfg->pmk_list->pmkids.pmkid[i], sizeof(pmkid_v3_t));
for (; i < (npmkids - 1); i++) {
- memcpy(&cfg->pmk_list->pmkids.pmkid[i],
+ (void)memcpy_s(&cfg->pmk_list->pmkids.pmkid[i],
+ sizeof(pmkid_v3_t),
&cfg->pmk_list->pmkids.pmkid[i + 1],
- sizeof(pmkid_v2_t));
+ sizeof(pmkid_v3_t));
}
npmkids--;
- cfg->pmk_list->pmkids.length -= sizeof(pmkid_v2_t);
+ cfg->pmk_list->pmkids.length -= sizeof(pmkid_v3_t);
+ cfg->pmk_list->pmkids.count--;
+
} else {
err = -EINVAL;
}
+ /* current wl_update_pmklist() doesn't delete corresponding PMKID entry.
+ * inside firmware. So we need to issue delete action explicitely through
+ * this function.
+ */
+ err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, FALSE);
+ /* intentional fall through even on error.
+ * it should work above MIN_PMKID_LIST_V3_FW_MAJOR, otherwise let ignore it.
+ */
+
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
+/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
+ * entry operation.
+ */
static s32
wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
RETURN_EIO_IF_NOT_UP(cfg);
- memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
- cfg->pmk_list->pmkids.length = sizeof(pmkid_list_v2_t) - sizeof(pmkid_v2_t);
- cfg->pmk_list->pmkids.version = PMKID_LIST_VER_2;
+ bzero(cfg->pmk_list, sizeof(*cfg->pmk_list));
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
-static wl_scan_params_t *
-wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg, int channel, int nprobes,
- int *out_params_size)
-{
- wl_scan_params_t *params;
- int params_size;
- int num_chans;
-
- *out_params_size = 0;
-
- /* Our scan params only need space for 1 channel and 0 ssids */
- params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
- params = (wl_scan_params_t *)MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
- return params;
- }
- memset(params, 0, params_size);
- params->nprobes = nprobes;
-
- num_chans = (channel == 0) ? 0 : 1;
-
- memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = DOT11_SCANTYPE_ACTIVE;
- params->nprobes = htod32(1);
- params->active_time = htod32(-1);
- params->passive_time = htod32(-1);
- params->home_time = htod32(10);
- if (channel == -1)
- params->channel_list[0] = htodchanspec(channel);
- else
- params->channel_list[0] = wl_ch_host_to_driver(channel);
-
- /* Our scan params have 1 channel and 0 ssids */
- params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
-
- *out_params_size = params_size; /* rtn size to the caller */
- return params;
-}
-
static s32
-#if defined(WL_CFG80211_P2P_DEV_IF)
-wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
- struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
-#else
wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
- struct ieee80211_channel * channel,
+ struct ieee80211_channel *channel,
+#if !defined(WL_CFG80211_P2P_DEV_IF)
enum nl80211_channel_type channel_type,
- unsigned int duration, u64 *cookie)
#endif /* WL_CFG80211_P2P_DEV_IF */
+ unsigned int duration, u64 *cookie)
{
s32 target_channel;
u32 id;
struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ RETURN_EIO_IF_NOT_UP(cfg);
#ifdef DHD_IFDEBUG
PRINT_WDEV_INFO(cfgdev);
#endif /* DHD_IFDEBUG */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef WL_NAN
- if (wl_cfgnan_check_state(cfg)) {
- WL_ERR(("nan is enabled, nan + p2p concurrency not supported\n"));
- return BCME_UNSUPPORTED;
- }
-#endif /* WL_NAN */
-
mutex_lock(&cfg->usr_sync);
WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
ieee80211_frequency_to_channel(channel->center_freq),
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status(cfg, SCANNING, ndev)) {
- struct timer_list *_timer;
+ timer_list_compat_t *_timer;
WL_DBG(("scan is running. go to fake listen state\n"));
if (duration > LONG_LISTEN_TIME) {
* without turning on P2P
*/
get_primary_mac(cfg, &primary_mac);
+#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
p2p_on(cfg) = true;
}
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
} else {
- WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n",
- __FUNCTION__, cookie, cfg->last_roc_id));
+ WL_ERR(("wl_cfg80211_cancel_remain_on_channel: ignore, request cookie(%llu)"
+ " is not matched. (cur : %llu)\n",
+ cookie, cfg->last_roc_id));
}
return err;
bool search_channel; /* 1: search peer's channel to send af */
};
+#ifdef WL_DISABLE_HE_P2P
+static s32
+wl_cfg80211_he_p2p_disable(struct wiphy *wiphy, struct ether_addr peer_mac)
+{
+ struct cfg80211_bss *bss;
+ u8 *ie = NULL;
+ u32 ie_len = 0;
+ struct net_device *ndev = NULL;
+ s32 bssidx = 0;
+ s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ bss = CFG80211_GET_BSS(wiphy, NULL, peer_mac.octet, NULL, 0);
+ if (!bss) {
+ WL_ERR(("Could not find the Peer device\n"));
+ return BCME_ERROR;
+ } else {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
+#else
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ GCC_DIAGNOSTIC_POP();
+ }
+ if (ie) {
+ if ((bcm_parse_tlvs_dot11(ie, ie_len,
+ EXT_MNG_HE_CAP_ID, TRUE)) == NULL) {
+ WL_DBG(("Peer does not support HE capability\n"));
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
+ if (ndev && (bssidx =
+ wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ err = BCME_ERROR;
+ } else {
+ WL_DBG(("Disabling HE for P2P\n"));
+ err = wl_cfg80211_set_he_mode(ndev, cfg, bssidx,
+ WL_IF_TYPE_P2P_DISC, FALSE);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+ }
+ } else {
+ WL_DBG(("Peer supports HE capability\n"));
+ }
+ }
+ CFG80211_PUT_BSS(wiphy, bss);
+
+ return err;
+}
+#endif /* WL_DISABLE_HE_P2P */
+
static s32
wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
wl_action_frame_t *action_frame, wl_af_params_t *af_params,
switch (act_frm->subtype) {
case P2P_PAF_GON_REQ: {
+ /* Disable he if peer does not support before starting GONEG */
+#ifdef WL_DISABLE_HE_P2P
+ wl_cfg80211_he_p2p_disable(wiphy, action_frame->da);
+#endif /* WL_DISABLE_HE_P2P */
WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
wl_set_p2p_status(cfg, GO_NEG_PHASE);
/* if scan is ongoing, abort current scan. */
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
/* Abort P2P listen */
* but after the check of piggyback algorithm.
* To take care of current piggback algo, lets abort the scan here itself.
*/
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
/* Suspend P2P discovery's search-listen to prevent it from
* starting a scan or changing the channel.
*/
#if defined(P2P_IE_MISSING_FIX)
if (!cfg->p2p_prb_noti) {
cfg->p2p_prb_noti = true;
- WL_DBG(("%s: TX 802_1X Probe Response first time.\n",
- __FUNCTION__));
+ WL_DBG(("wl_cfg80211_mgmt_tx: TX 802_1X Probe"
+ " Response first time.\n"));
}
#endif // endif
goto exit;
* And previous off-channel action frame must be ended before new af tx.
*/
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
}
return err;
}
+static int
+wl_get_bandwidth_cap(struct net_device *ndev, uint32 band, uint32 *bandwidth)
+{
+ u32 bw = WL_CHANSPEC_BW_20;
+ s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ param.band = WLC_BAND_5G;
+ err = wldev_iovar_getbuf(ndev, "bw_cap", ¶m, sizeof(param),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ WL_ERR(("bw_cap failed, %d\n", err));
+ return err;
+ } else {
+ err = wldev_iovar_getint(ndev, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+ }
+ if (bw_cap != WLC_N_BW_20ALL) {
+ bw = WL_CHANSPEC_BW_40;
+ }
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_80;
+ } else if (WL_BW_CAP_40MHZ(ioctl_buf[0])) {
+ bw = WL_CHANSPEC_BW_40;
+ } else {
+ bw = WL_CHANSPEC_BW_20;
+ }
+ }
+ } else if (band == IEEE80211_BAND_2GHZ) {
+ bw = WL_CHANSPEC_BW_20;
+ }
+
+ *bandwidth = bw;
+
+ return err;
+}
+
static s32
wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
chanspec_t chspec = 0;
chanspec_t fw_chspec = 0;
u32 bw = WL_CHANSPEC_BW_20;
-
s32 err = BCME_OK;
- s32 bw_cap = 0;
- struct {
- u32 band;
- u32 bw_cap;
- } param = {0, 0};
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(CUSTOM_SET_CPUCORE) || defined(APSTA_RESTRICTED_CHANNEL)
+#if defined(CUSTOM_SET_CPUCORE) || defined(APSTA_RESTRICTED_CHANNEL) || defined(WL_EXT_IAPSTA)
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
#endif /* CUSTOM_SET_CPUCORE || APSTA_RESTRICTED_CHANNEL */
dev = ndev_to_wlc_ndev(dev, cfg);
_chan = ieee80211_frequency_to_channel(chan->center_freq);
#ifdef WL_EXT_IAPSTA
- _chan = wl_ext_iapsta_update_channel(dev, _chan);
+ _chan = wl_ext_iapsta_update_channel(dhd, dev, _chan);
#endif
WL_MSG(dev->name, "netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
dev->ifindex, channel_type, _chan);
#endif /* NOT_YET */
#if defined(APSTA_RESTRICTED_CHANNEL)
-#define DEFAULT_2G_SOFTAP_CHANNEL 1
-#define DEFAULT_5G_SOFTAP_CHANNEL 149
if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
}
}
}
-#undef DEFAULT_2G_SOFTAP_CHANNEL
-#undef DEFAULT_5G_SOFTAP_CHANNEL
#endif /* APSTA_RESTRICTED_CHANNEL */
- if (chan->band == IEEE80211_BAND_5GHZ) {
- param.band = WLC_BAND_5G;
- err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (err) {
- if (err != BCME_UNSUPPORTED) {
- WL_ERR(("bw_cap failed, %d\n", err));
- return err;
- } else {
- err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
- if (err) {
- WL_ERR(("error get mimo_bw_cap (%d)\n", err));
- }
- if (bw_cap != WLC_N_BW_20ALL)
- bw = WL_CHANSPEC_BW_40;
- }
- } else {
- if (WL_BW_CAP_80MHZ(ioctl_buf[0]))
- bw = WL_CHANSPEC_BW_80;
- else if (WL_BW_CAP_40MHZ(ioctl_buf[0]))
- bw = WL_CHANSPEC_BW_40;
- else
- bw = WL_CHANSPEC_BW_20;
-
- }
+ err = wl_get_bandwidth_cap(dev, chan->band, &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information, err=%d\n", err));
+ return err;
+ }
- } else if (chan->band == IEEE80211_BAND_2GHZ)
- bw = WL_CHANSPEC_BW_20;
set_channel:
chspec = wf_channel2chspec(_chan, bw);
if (wf_chspec_valid(chspec)) {
wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
{
struct net_info *_net_info, *next;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
return _net_info->ndev;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+
return NULL;
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
return 0;
}
+#define MAX_FILS_IND_IE_LEN 1024u
static s32
wl_validate_fils_ind_ie(struct net_device *dev, const bcm_tlv_t *filsindie, s32 bssidx)
{
s32 err = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct bcm_cfg80211 *cfg = NULL;
bcm_iov_buf_t *iov_buf = NULL;
bcm_xtlv_t* pxtlv;
- int iov_buf_size = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) + filsindie->len - 1;
- if (filsindie == NULL)
+ int iov_buf_size = 0;
+
+ if (!dev || !filsindie) {
+ WL_ERR(("%s: dev/filsidie is null\n", __FUNCTION__));
+ goto exit;
+ }
+
+ cfg = wl_get_cfg(dev);
+ if (!cfg) {
+ WL_ERR(("%s: cfg is null\n", __FUNCTION__));
goto exit;
+ }
+
+ iov_buf_size = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) + filsindie->len - 1;
iov_buf = MALLOCZ(cfg->osh, iov_buf_size);
if (!iov_buf) {
WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, iov_buf_size));
pxtlv = (bcm_xtlv_t*)&iov_buf->data[0];
pxtlv->id = WL_FILS_XTLV_IND_IE;
pxtlv->len = filsindie->len;
- memcpy(pxtlv->data, filsindie->data, filsindie->len);
+ /* memcpy_s return check not required as buffer is allocated based on ie
+ * len
+ */
+ (void)memcpy_s(pxtlv->data, filsindie->len, filsindie->data, filsindie->len);
err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf_size,
cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
}
exit:
-
if (err < 0) {
WL_ERR(("FILS Ind setting error %d\n", err));
}
case RSN_AKM_FILS_SHA384:
wpa_auth |= WPA2_AUTH_FILS_SHA384;
break;
+#ifdef WL_SAE
+ case RSN_AKM_SAE_PSK:
+ wpa_auth |= WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE */
#endif /* MFP */
default:
WL_ERR(("No Key Mgmt Info\n"));
ret = WPA_AUTH_PSK;
}
break;
+#ifdef WL_SAE
+ case RSN_AKM_SAE_PSK:
+ ret = WPA3_AUTH_SAE_PSK;
+ break;
+#endif /* WL_SAE */
default:
WL_ERR(("No Key Mgmt Info\n"));
}
if (bss->rsn_ie) {
MFREE(cfg->osh, bss->rsn_ie, bss->rsn_ie[1]
+ WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = NULL;
}
if (bss->wpa_ie) {
MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
+ WPA_RSN_IE_TAG_FIXED_LEN);
- bss->wpa_ie = NULL;
}
if (bss->wps_ie) {
MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
- bss->wps_ie = NULL;
}
if (bss->fils_ind_ie) {
MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
+ FILS_INDICATION_IE_TAG_FIXED_LEN);
- bss->fils_ind_ie = NULL;
}
if (ies->wpa_ie != NULL) {
/* WPAIE */
+ WPA_RSN_IE_TAG_FIXED_LEN);
}
}
+#ifdef WL_FILS
+ if (ies->fils_ind_ie) {
+ bss->fils_ind_ie = MALLOCZ(cfg->osh,
+ ies->fils_ind_ie->len
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ if (bss->fils_ind_ie) {
+ memcpy(bss->fils_ind_ie, ies->fils_ind_ie,
+ ies->fils_ind_ie->len
+ + FILS_INDICATION_IE_TAG_FIXED_LEN);
+ }
+ }
+#endif /* WL_FILS */
#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
}
#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(cfg->hostapd_ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+ bzero(cfg->hostapd_ssid.SSID, DOT11_MAX_SSID_LEN);
memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
- cfg->hostapd_ssid.SSID_len = info->ssid_len;
+ cfg->hostapd_ssid.SSID_len = (uint32)info->ssid_len;
} else {
/* P2P GO */
- memset(cfg->p2p->ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+ bzero(cfg->p2p->ssid.SSID, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
- cfg->p2p->ssid.SSID_len = info->ssid_len;
+ cfg->p2p->ssid.SSID_len = (uint32)info->ssid_len;
}
}
- if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
- if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
- WL_ERR(("failed to set hidden : %d\n", err));
- WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
- }
-
return err;
}
#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
{
s32 err = BCME_OK;
- memset(ies, 0, sizeof(struct parsed_ies));
+ bzero(ies, sizeof(struct parsed_ies));
/* find the WPSIE */
if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
}
-bool
-wl_legacy_chip_check(struct bcm_cfg80211 *cfg)
-{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- uint chip;
-
- chip = dhd_conf_get_chip(dhd);
-
- if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
- chip == BCM43430_CHIP_ID || chip == BCM43012_CHIP_ID ||
- chip == BCM4334_CHIP_ID || chip == BCM43340_CHIP_ID ||
- chip == BCM43341_CHIP_ID || chip == BCM4324_CHIP_ID ||
- chip == BCM4335_CHIP_ID || chip == BCM4339_CHIP_ID ||
- chip == BCM4345_CHIP_ID || chip == BCM43454_CHIP_ID ||
- chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
- chip == BCM4371_CHIP_ID || chip == BCM4359_CHIP_ID ||
- chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
- chip == BCM43569_CHIP_ID) {
- return true;
- }
-
- return false;
-}
-
static s32
wl_cfg80211_set_ap_role(
struct bcm_cfg80211 *cfg,
}
if (!ap) {
+#ifdef WLEASYMESH
+ WL_ERR(("ap should not be 0\n"));
+#endif
/* AP mode switch not supported. Try setting up AP explicitly */
err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
if (unlikely(err)) {
return err;
}
if (apsta == 0) {
+#ifdef WLEASYMESH
+ WL_ERR(("apsta should not be 0\n"));
+#endif
/* If apsta is not set, set it */
/* Check for any connected interfaces before wl down */
WL_ERR(("WLC_DOWN error %d\n", err));
return err;
}
- err = wldev_iovar_setint(dev, "apsta", 1);
+ err = wldev_iovar_setint(dev, "apsta", 0);
if (err < 0) {
WL_ERR(("wl apsta 0 error %d\n", err));
return err;
WL_ERR(("wl apsta 0 error %d\n", err));
return err;
}
- if ((err = wldev_ioctl_set(dev, WLC_SET_AP, &ap, sizeof(s32))) < 0) {
+ ap = 1;
+ if ((err = wldev_ioctl_set(dev, WLC_SET_AP, &ap, sizeof(s32))) < 0) {
WL_ERR(("setting AP mode failed %d \n", err));
return err;
}
#endif /* SOFTAP_UAPSD_OFF */
s32 err = BCME_OK;
s32 is_rsdb_supported = BCME_ERROR;
- u32 timeout;
+ long timeout;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ char sec[32];
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
/* Do abort scan before creating GO */
wl_cfg80211_scan_abort(cfg);
+ wl_ext_get_sec(dev, 0, sec, sizeof(sec));
+ WL_MSG(dev->name, "Creating AP/GO with sec=%s\n", sec);
if (dev_role == NL80211_IFTYPE_P2P_GO) {
is_bssup = wl_cfg80211_bss_isup(dev, bssidx);
if (!is_bssup && (ies->wpa2_ie != NULL)) {
WL_DBG(("Bss is already up\n"));
} else if (dev_role == NL80211_IFTYPE_AP) {
- if (!wl_get_drv_status(cfg, AP_CREATED, dev)) {
+// if (!wl_get_drv_status(cfg, AP_CREATING, dev)) {
/* Make sure fw is in proper state */
err = wl_cfg80211_set_ap_role(cfg, dev);
if (unlikely(err)) {
WL_ERR(("set ap role failed!\n"));
goto exit;
}
- }
+// }
/* Device role SoftAP */
WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role));
}
#endif /* DISABLE_11H_SOFTAP */
+#ifdef WL_DISABLE_HE_SOFTAP
+ err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, FALSE);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
#ifdef SOFTAP_UAPSD_OFF
err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
sizeof(struct wl_wsec_key), cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
/* clear the key after use */
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
goto exit;
}
#endif /* MFP */
- memset(&join_params, 0, sizeof(join_params));
+ bzero(&join_params, sizeof(join_params));
/* join parameters starts with ssid */
join_params_size = sizeof(join_params.ssid);
join_params.ssid.SSID_len = MIN(cfg->hostapd_ssid.SSID_len,
err = -ERESTARTSYS;
goto exit;
}
+ if (dhd_query_bus_erros(dhdp)) {
+ err = -ENODEV;
+ goto exit;
+ }
+ dhdp->iface_op_failed = TRUE;
#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
if (dhdp->memdump_enabled) {
dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
exit:
if (cfg->wep_key.len) {
- memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
}
#ifdef MFP
}
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = info->proberesp_ies_len;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = info->probe_resp_len -
- offsetof(const struct ieee80211_mgmt, u.probe_resp.variable);
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(const struct ieee80211_mgmt, u.probe_resp.variable));
}
}
/* Parse Probe Response IEs */
}
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = info->proberesp_ies_len;
+ vndr_ie_len = (uint32)info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = info->probe_resp_len -
- offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ vndr_ie_len = (uint32)(info->probe_resp_len -
+ offsetof(struct ieee80211_mgmt, u.probe_resp.variable));
}
}
update_bss = true;
MFREE(cfg->osh, bss->rsn_ie,
bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = NULL;
bss->wpa_ie = MALLOCZ(cfg->osh,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
if (bss->wpa_ie) {
if (err < 0) {
WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
}
- WL_INFORM_MEM(("Disconnect STA : " MACDBG " scb_val.val %d\n",
+ WL_MSG(dev->name, "Disconnect STA : " MACDBG " scb_val.val %d\n",
MAC2STRDBG(bcm_ether_ntoa((const struct ether_addr *)mac_addr,
- eabuf)), scb_val.val));
+ eabuf)), scb_val.val);
}
#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
struct station_parameters *params)
#endif // endif
{
- int err;
-#if defined(DHD_LOSSLESS_ROAMING) || defined(WL_ENABLE_P2P_IF)
+ int err = BCME_OK;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#endif // endif
struct net_device *ndev = ndev_to_wlc_ndev(dev, cfg);
WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
"sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
params->sta_flags_mask, params->sta_flags_set, ndev->name));
+ if ((wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS) &&
+ !(wl_get_drv_status(cfg, CONNECTED, dev))) {
+ /* Return error indicating not in connected state */
+ WL_ERR(("Ignore SCB_AUTHORIZE/DEAUTHORIZE in non connected state\n"));
+ return -ENOTSUPP;
+ }
+
/* Processing only authorize/de-authorize flag for now */
if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
#endif // endif
+
return err;
}
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
dev_role = NL80211_IFTYPE_AP;
dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
err = dhd_ndo_enable(dhd, FALSE);
- WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err));
+ WL_DBG(("Disabling NDO on Hostapd mode %d\n", err));
if (err) {
- WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err));
+ WL_ERR(("Disabling NDO Failed %d\n", err));
}
#ifdef PKT_FILTER_SUPPORT
/* Disable packet filter */
if (dhd->early_suspended) {
WL_ERR(("Disable pkt_filter\n"));
dhd_enable_packet_filter(0, dhd);
+#ifdef APF
+ dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
// goto fail;
}
+ wl_set_drv_status(cfg, CONNECTED, dev);
WL_DBG(("** AP/GO Created **\n"));
#ifdef WL_CFG80211_ACL
/* Set IEs to FW */
if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
WL_ERR(("Set IEs failed \n"));
+
+#ifdef WLDWDS
+ if (dev->ieee80211_ptr->use_4addr) {
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_ASSOCRSP_FLAG, (const u8 *)info->beacon.assocresp_ies,
+ info->beacon.assocresp_ies_len)) < 0) {
+ WL_ERR(("Set ASSOC RESP IE Failed\n"));
+ }
+ }
+#endif /* WLDWDS */
/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
}
}
+ /* Configure hidden SSID */
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
+ if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+ WL_ERR(("failed to set hidden : %d\n", err));
+ WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+ }
+
#ifdef SUPPORT_AP_RADIO_PWRSAVE
if (dev_role == NL80211_IFTYPE_AP) {
if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
}
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef WL_IRQSET
- dhd_irq_set_affinity(dhd);
-#endif /* WL_IRQSET */
-
fail:
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
if (dhd->early_suspended) {
WL_ERR(("Enable pkt_filter\n"));
dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
WL_DBG(("Enter \n"));
+ if (wl_cfg80211_get_bus_state(cfg)) {
+ /* since bus is down, iovar will fail. recovery path will bringup the bus. */
+ WL_ERR(("bus is not ready\n"));
+ return BCME_OK;
+ }
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
return (-ENODEV);
if (dhd->early_suspended) {
WL_ERR(("Enable pkt_filter\n"));
dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
}
}
+#ifdef WL_DISABLE_HE_SOFTAP
+ if (wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, TRUE) != BCME_OK) {
+ WL_ERR(("failed to set he features\n"));
+ }
+#endif /* WL_DISABLE_HE_SOFTAP */
+
wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
#ifdef SUPPORT_AP_RADIO_PWRSAVE
if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
DOT11_MNG_SSID_ID)) != NULL) {
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(&cfg->hostapd_ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+ bzero(&cfg->hostapd_ssid.SSID[0], DOT11_MAX_SSID_LEN);
cfg->hostapd_ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data,
cfg->hostapd_ssid.SSID_len);
} else {
/* P2P GO */
- memset(&cfg->p2p->ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+ bzero(&cfg->p2p->ssid.SSID[0], DOT11_MAX_SSID_LEN);
cfg->p2p->ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, ssid_ie->data,
cfg->p2p->ssid.SSID_len);
/* Set GC/STA SCB expiry timings. */
if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
WL_ERR(("scb setting failed \n"));
+ if (err == BCME_UNSUPPORTED)
+ err = 0;
// goto fail;
}
}
WL_DBG(("** ADD/SET beacon done **\n"));
+ wl_set_drv_status(cfg, CONNECTED, dev);
fail:
if (err) {
}
#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
-#ifdef WL_SCHED_SCAN
-#define PNO_TIME 30
-#define PNO_REPEAT 4
-#define PNO_FREQ_EXPO_MAX 2
-static bool
-is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
-{
- int i;
-
- if (!ssid || !ssid_list)
- return FALSE;
-
- for (i = 0; i < count; i++) {
- if (ssid->ssid_len == ssid_list[i].ssid_len) {
- if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
- return TRUE;
- }
- }
- return FALSE;
-}
-
-static int
-wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
- struct net_device *dev,
- struct cfg80211_sched_scan_request *request)
-{
- ushort pno_time = PNO_TIME;
- int pno_repeat = PNO_REPEAT;
- int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
- wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- struct cfg80211_ssid *ssid = NULL;
- struct cfg80211_ssid *hidden_ssid_list = NULL;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
- int ssid_cnt = 0;
- int i;
- int ret = 0;
- unsigned long flags;
-
- if (!request) {
- WL_ERR(("Sched scan request was NULL\n"));
- return -EINVAL;
- }
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN START\n"));
- WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
- request->n_match_sets, request->n_ssids));
- WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
- request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
-
- if (!request->n_ssids || !request->n_match_sets) {
- WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
- return -EINVAL;
- }
-
- memset(&ssids_local, 0, sizeof(ssids_local));
-
- if (request->n_ssids > 0) {
- hidden_ssid_list = request->ssids;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- return -ENOMEM;
- }
- memset(event_data, 0, alloc_len);
- event_data->tlvs = NULL;
- tlv_len = sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate log_tlv with "
- "length(%d)\n", __func__, tlv_len));
- MFREE(cfg->osh, event_data, alloc_len);
- return -ENOMEM;
- }
- }
- for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
- ssid = &request->match_sets[i].ssid;
- /* No need to include null ssid */
- if (ssid->ssid_len) {
- ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
- (uint32)DOT11_MAX_SSID_LEN);
- memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid,
- ssids_local[ssid_cnt].SSID_len);
- if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
- ssids_local[ssid_cnt].hidden = TRUE;
- WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
- } else {
- ssids_local[ssid_cnt].hidden = FALSE;
- WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
- }
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
- if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
- ssids_local[ssid_cnt].rssi_thresh =
- (int8)request->match_sets[i].rssi_thold;
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
- ssid_cnt++;
- }
- }
-
- if (ssid_cnt) {
- if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
- pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
- WL_ERR(("PNO setup failed!! ret=%d \n", ret));
- ret = -EINVAL;
- goto exit;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- for (i = 0; i < ssid_cnt; i++) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
- tlv_data = event_data->tlvs;
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = ssids_local[i].SSID_len;
- memcpy(tlv_data->value, ssids_local[i].SSID,
- ssids_local[i].SSID_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- event_data, payload_len);
- }
- }
-
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = request;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- } else {
- ret = -EINVAL;
- }
-exit:
- if (event_data) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return ret;
-}
-
-static int
-wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- , u64 reqid
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
-)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN STOP\n"));
-
- if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
- WL_ERR(("PNO Stop for SSID failed"));
- } else {
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
- }
-
- if (cfg->scan_request && cfg->sched_scan_running) {
- WL_PNO((">>> Sched scan running. Aborting it..\n"));
- wl_notify_escan_complete(cfg, dev, true, true);
- }
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = NULL;
- cfg->sched_scan_running = FALSE;
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- return 0;
-}
-#endif /* WL_SCHED_SCAN */
-
#ifdef WL_SUPPORT_ACS
/*
* Currently the dump_obss IOVAR is returning string as output so we need to
/* This should be enabled from kernel version which supports this */
.update_connect_params = wl_cfg80211_update_connect_params,
#endif /* WL_FILS */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+ .set_pmk = wl_cfg80211_set_pmk,
+ .del_pmk = wl_cfg80211_del_pmk,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ .channel_switch = wl_cfg80211_channel_switch,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
};
s32 wl_mode_to_nl80211_iftype(s32 mode)
struct wiphy *wiphy = wdev->wiphy;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (cfg->nan_enable) {
- ret = wl_cfgnan_disable(cfg, NAN_COUNTRY_CODE_CHANGE);
+ mutex_lock(&cfg->if_sync);
+ cfg->nancfg.disable_reason = NAN_COUNTRY_CODE_CHANGE;
+ ret = wl_cfgnan_disable(cfg);
+ mutex_unlock(&cfg->if_sync);
if (ret != BCME_OK) {
WL_ERR(("failed to disable nan, error[%d]\n", ret));
return ret;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
#endif /* CONFIG_PM */
+int wl_features_set(u8 *array, uint8 len, u32 ftidx)
+{
+ u8* ft_byte;
+
+ if ((ftidx / 8u) >= len)
+ return BCME_BADARG;
+
+ ft_byte = &array[ftidx / 8u];
+ *ft_byte |= BIT(ftidx % 8u);
+ return BCME_OK;
+}
+
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, dhd_pub_t *context)
{
s32 err = 0;
wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- wdev->wiphy->max_sched_scan_plan_interval = PNO_SCAN_MAX_FW_SEC;
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+#endif /* LINUX_VER < 4.12 */
#endif /* WL_SCHED_SCAN */
wdev->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION)
WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
#endif // endif
WIPHY_FLAG_4ADDR_STATION;
-#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
- KERNEL_VERSION(3, 2, 0)))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
/*
- * If FW ROAM flag is advertised, upper layer wouldn't provide
- * the bssid & freq in the connect command. This will result a
- * delay in initial connection time due to firmware doing a full
- * channel scan to figure out the channel & bssid. However kernel
- * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel
- * ver >= 3.15 won't have any issue. So if this flags need to be
- * advertised for kernel < 3.15, suggest to use RCC along with it
- * to avoid the initial connection delay.
+ * If FW ROAM flag is advertised, upper layer doesn't provide the
+ * bssid & freq in the connect command. However, kernel ver >= 3.15,
+ * provides bssid_hint & freq_hint which can be used by the firmware.
+ * fw_ap_select variable determines whether FW selects the AP or the
+ * user space selects the target AP within the given ESS.
*/
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
-#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_OFFCHAN_TX;
wdev->wiphy->wowlan = &brcm_wowlan_support;
/* If this is not provided cfg stack will get disconnect
* during suspend.
+ * Note: wiphy->wowlan_config is freed by cfg80211 layer.
+ * so use malloc instead of MALLOC(osh) to avoid false alarm.
*/
- brcm_wowlan_config = MALLOC(dhd->osh, sizeof(struct cfg80211_wowlan));
+ brcm_wowlan_config = kmalloc(sizeof(struct cfg80211_wowlan), GFP_KERNEL);
if (brcm_wowlan_config) {
brcm_wowlan_config->disconnect = true;
brcm_wowlan_config->gtk_rekey_failure = true;
WL_DBG(("Registering custom regulatory)\n"));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wdev->wiphy->regulatory_flags |=
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ REGULATORY_IGNORE_STALE_KICKOFF |
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) */
+ REGULATORY_CUSTOM_REG;
#else
wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-#endif // endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
}
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#ifdef WL_FILS
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
+#endif /* WL_FILS */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ wdev->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wdev->wiphy->max_num_csa_counters = WL_MAX_NUM_CSA_COUNTERS;
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 12, 0) */
+
/* Now we can register wiphy with cfg80211 module */
err = wiphy_register(wdev->wiphy);
if (unlikely(err < 0)) {
#ifdef WL_SAE
wdev->wiphy->features |= NL80211_FEATURE_SAE;
#endif /* WL_SAE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && defined(BCMSUP_4WAY_HANDSHAKE)
+ if (FW_SUPPORTED(dhd, idsup)) {
+ err = wiphy_ext_feature_set(wdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
+ if (err) {
+ return err;
+ }
+ err = wiphy_ext_feature_set(wdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
+ if (err) {
+ return err;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) && defined(BCMSUP_4WAY_HANDSHAKE) */
+#ifdef WL_SCAN_TYPE
+ /* These scan types will be mapped to default scan on non-supported chipset */
+ /* Advertise scan type capability. */
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_SPAN_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_POWER_SCAN);
+ wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN);
+ wdev->wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
+#endif /* WL_SCAN_TYPE */
return err;
}
#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
/* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
- WL_DBG(("wl_free_wdev Clearing wowlan Config \n"));
- if (wdev->wiphy->wowlan_config) {
- MFREE(cfg->osh, wdev->wiphy->wowlan_config,
- sizeof(struct cfg80211_wowlan));
- wdev->wiphy->wowlan_config = NULL;
- }
+ WL_DBG(("clear wowlan\n"));
wdev->wiphy->wowlan = NULL;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
*/
}
-static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
{
struct wl_scan_results *bss_list;
wl_bss_info_t *bi = NULL; /* must be initialized */
return err;
}
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool roam)
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid)
{
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_mgmt *mgmt;
u32 freq;
s32 err = 0;
gfp_t aflags;
+ u8 tmp_buf[IEEE80211_MAX_SSID_LEN + 1];
chanspec_t chanspec;
if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
WL_DBG(("Beacon is larger than buffer. Discarding\n"));
return err;
}
+
+ if (bi->SSID_len > IEEE80211_MAX_SSID_LEN) {
+ WL_ERR(("wrong SSID len:%d\n", bi->SSID_len));
+ return -EINVAL;
+ }
+
aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
notif_bss_info = (struct wl_cfg80211_bss_info *)MALLOCZ(cfg->osh,
sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
wl_rst_ie(cfg);
- wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+ wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, update_ssid);
wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
offsetof(struct wl_cfg80211_bss_info, frame_buf));
return -EINVAL;
}
channel = ieee80211_get_channel(wiphy, freq);
- WL_SCAN(("BSSID %pM, channel %2d(%2d %sMHz), rssi %3d, capa 0x04%x, mgmt_type %d, "
+ memcpy(tmp_buf, bi->SSID, bi->SSID_len);
+ tmp_buf[bi->SSID_len] = '\0';
+ WL_SCAN(("BSSID %pM, channel %3d(%3d %sMHz), rssi %3d, capa 0x04%x, mgmt_type %d, "
"frame_len %d, SSID \"%s\"\n",
&bi->BSSID, notif_bss_info->channel, CHSPEC_CHANNEL(chanspec),
CHSPEC_IS20(chanspec)?"20":
CHSPEC_IS40(chanspec)?"40":
CHSPEC_IS80(chanspec)?"80":"160",
notif_bss_info->rssi, mgmt->u.beacon.capab_info, mgmt_type,
- notif_bss_info->frame_len, bi->SSID));
+ notif_bss_info->frame_len, tmp_buf));
if (unlikely(!channel)) {
WL_ERR(("ieee80211_get_channel error, freq=%d, channel=%d\n",
freq, notif_bss_info->channel));
signal = notif_bss_info->rssi * 100;
if (!mgmt->u.probe_resp.timestamp) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct timespec ts;
- get_monotonic_boottime(&ts);
+ struct osl_timespec ts;
+ osl_get_monotonic_boottime(&ts);
mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+ ts.tv_nsec / 1000;
#else
- struct timeval tv;
- do_gettimeofday(&tv);
+ struct osl_timespec tv;
+ osl_do_gettimeofday(&tv);
mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+ tv.tv_usec;
#endif // endif
(cfg->sched_scan_req && !cfg->scan_request)) {
alloc_len = sizeof(log_conn_event_t) + IEEE80211_MAX_SSID_LEN + sizeof(uint16) +
sizeof(int16);
- event_data = (log_conn_event_t *)MALLOCZ(cfg->osh, alloc_len);
+ event_data = (log_conn_event_t *)MALLOCZ(dhdp->osh, alloc_len);
if (!event_data) {
WL_ERR(("%s: failed to allocate the log_conn_event_t with "
"length(%d)\n", __func__, alloc_len));
return false;
}
-#ifdef WL_LASTEVT
-static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data)
-{
- u32 event = ntoh32(e->event_type);
- u16 flags = ntoh16(e->flags);
- wl_last_event_t *last_event = (wl_last_event_t *)data;
- u32 len = ntoh32(e->datalen);
-
- if (event == WLC_E_DEAUTH_IND ||
- event == WLC_E_DISASSOC_IND ||
- event == WLC_E_DISASSOC ||
- event == WLC_E_DEAUTH) {
- WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
- return true;
- } else if (event == WLC_E_LINK) {
- if (!(flags & WLC_EVENT_MSG_LINK)) {
- if (last_event && len > 0) {
- u32 current_time = last_event->current_time;
- u32 timestamp = last_event->timestamp;
- u32 event_type = last_event->event.event_type;
- u32 status = last_event->event.status;
- u32 reason = last_event->event.reason;
-
- WL_ERR(("Last roam event before disconnection : current_time %d,"
- " time %d, type %d, status %d, reason %d\n",
- current_time, timestamp, event_type, status, reason));
- }
- WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
- return true;
- }
- }
-
- return false;
-}
-#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
u32 event = ntoh32(e->event_type);
return false;
}
-#endif /* WL_LASTEVT */
static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
return true;
if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
return true;
+ if (event == WLC_E_ASSOC_RESP_IE && status != WLC_E_STATUS_SUCCESS)
+ return true;
return false;
}
-/* The mainline kernel >= 3.2.0 has support for indicating new/del station
- * to AP/P2P GO via events. If this change is backported to kernel for which
- * this driver is being built, then define WL_CFG80211_STA_EVENT. You
- * should use this new/del sta event mechanism for BRCM supplicant >= 22.
- */
+#ifdef WL_SAE
static s32
-wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+wl_cfg80211_event_sae_key(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ wl_sae_key_info_t *sae_key)
{
- s32 err = 0;
- u32 event = ntoh32(e->event_type);
- u32 reason = ntoh32(e->reason);
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ int err = BCME_OK;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, BRCM_SAE_VENDOR_EVENT_BUF_LEN,
+ BRCM_VENDOR_EVENT_SAE_KEY, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ err = BCME_NOMEM;
+ goto done;
+ }
+
+ WL_INFORM_MEM(("Received Sae Key event for "MACDBG" key length %x %x",
+ MAC2STRDBG(sae_key->peer_mac), sae_key->pmk_len, sae_key->pmkid_len));
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PEER_MAC, ETHER_ADDR_LEN, sae_key->peer_mac);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMK, sae_key->pmk_len, sae_key->pmk);
+ nla_put(skb, BRCM_SAE_KEY_ATTR_PMKID, sae_key->pmkid_len, sae_key->pmkid);
+ cfg80211_vendor_event(skb, kflags);
+
+done:
+ return err;
+}
+
+static s32
+wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *event, void *data)
+{
+ int err = BCME_OK;
+ uint status = ntoh32(event->status);
+ wl_auth_event_t *auth_data;
+ wl_sae_key_info_t sae_key;
+ uint16 tlv_buf_len;
+
+ if (status == WLC_E_STATUS_SUCCESS) {
+ auth_data = (wl_auth_event_t *)data;
+ if (auth_data->version != WL_AUTH_EVENT_DATA_V1) {
+ WL_ERR(("unknown auth event data version %x\n",
+ auth_data->version));
+ err = BCME_VERSION;
+ goto done;
+ }
+
+ tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V1;
+
+ /* check if PMK info present */
+ sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmk || !sae_key.pmk_len) {
+ WL_ERR(("Mandatory PMK info not present"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ /* check if PMKID info present */
+ sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
+ WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
+ if (!sae_key.pmkid || !sae_key.pmkid_len) {
+ WL_ERR(("Mandatory PMKID info not present\n"));
+ err = BCME_NOTFOUND;
+ goto done;
+ }
+ memcpy_s(sae_key.peer_mac, ETHER_ADDR_LEN, event->addr.octet, ETHER_ADDR_LEN);
+ err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
+ if (err) {
+ WL_ERR(("Failed to event sae key info\n"));
+ }
+ } else {
+ WL_ERR(("sae auth status failure:%d\n", status));
+ }
+done:
+ return err;
+}
+#endif /* WL_SAE */
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 reason = ntoh32(e->reason);
+ u32 event = ntoh32(e->event_type);
+#ifdef WL_SAE
+ uint auth_type = ntoh32(e->auth_type);
+#endif /* WL_SAE */
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ WL_DBG(("event type : %d, reason : %d\n", event, reason));
+
+ if (sec) {
+ switch (event) {
+ case WLC_E_ASSOC:
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ sec->auth_assoc_res_status = reason;
+#ifdef WL_SAE
+ if ((event == WLC_E_AUTH || event == WLC_E_AUTH_IND) &&
+ auth_type == DOT11_SAE) {
+ wl_bss_handle_sae_auth(cfg, ndev, e, data);
+ }
+#endif /* WL_SAE */
+ break;
+ default:
+ break;
+ }
+ } else {
+ WL_ERR(("sec is NULL\n"));
+ }
+ return 0;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
u32 len = ntoh32(e->datalen);
u32 status = ntoh32(e->status);
WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
ndev->name, event, ntoh32(e->status), reason));
+
+ if (event == WLC_E_AUTH_IND) {
+ wl_get_auth_assoc_status(cfg, ndev, e, data);
+ return 0;
+ }
/* if link down, bsscfg is disabled. */
if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
/* AP/GO brought up successfull in firmware */
WL_MSG(ndev->name, "AP/GO Link up\n");
wl_set_drv_status(cfg, AP_CREATED, ndev);
+ OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
+#ifdef WL_BCNRECV
+ /* check fakeapscan is in progress, if progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
+#endif /* WL_BCNRECV */
wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_AP_ENABLED, NULL);
return 0;
}
}
if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
- WL_DBG(("event %s(%d) status %d reason %d\n",
- bcmevent_get_name(event), event, ntoh32(e->status), reason));
+ WL_MSG(ndev->name, "event %s(%d) status %d reason %d\n",
+ bcmevent_get_name(event), event, ntoh32(e->status), reason);
}
#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
if (len) {
body = (u8 *)MALLOCZ(cfg->osh, len);
if (body == NULL) {
- WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+ WL_ERR(("Failed to allocate body\n"));
return WL_INVALID;
}
}
- memset(&bssid, 0, ETHER_ADDR_LEN);
+ bzero(&bssid, ETHER_ADDR_LEN);
WL_DBG(("Enter event %d ndev %p\n", event, ndev));
if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
MFREE(cfg->osh, body, len);
wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx, NULL);
memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
- memset(&bssid, 0, sizeof(bssid));
+ bzero(&bssid, sizeof(bssid));
err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
switch (event) {
case WLC_E_ASSOC_IND:
fc = 0;
goto exit;
}
- memset(&ci, 0, sizeof(ci));
+ bzero(&ci, sizeof(ci));
if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
MFREE(cfg->osh, body, len);
return err;
MFREE(cfg->osh, body, body_len);
}
#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+ memset(&sinfo, 0, sizeof(struct station_info));
sinfo.filled = 0;
if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
reason == DOT11_SC_SUCCESS) {
return err;
}
-#ifdef WL_SAE
-static s32
-wl_cfg80211_event_sae_key(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- wl_sae_key_info_t *sae_key)
-{
- struct sk_buff *skb;
- gfp_t kflags;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- int err = BCME_OK;
-
- kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), BRCM_SAE_VENDOR_EVENT_BUF_LEN,
- BRCM_VENDOR_EVENT_SAE_KEY, kflags);
-#else
- skb = cfg80211_vendor_event_alloc(wiphy, BRCM_SAE_VENDOR_EVENT_BUF_LEN,
- BRCM_VENDOR_EVENT_SAE_KEY, kflags);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
- if (!skb) {
- WL_ERR(("skb alloc failed"));
- err = BCME_NOMEM;
- goto done;
- }
-
- WL_INFORM_MEM(("Received Sae Key event for "MACDBG" key length %x %x",
- MAC2STRDBG(sae_key->bssid), sae_key->pmk_len, sae_key->pmkid_len));
- nla_put(skb, BRCM_SAE_KEY_ATTR_BSSID, ETHER_ADDR_LEN, sae_key->bssid);
- nla_put(skb, BRCM_SAE_KEY_ATTR_PMK, sae_key->pmk_len, sae_key->pmk);
- nla_put(skb, BRCM_SAE_KEY_ATTR_PMKID, sae_key->pmkid_len, sae_key->pmkid);
- cfg80211_vendor_event(skb, kflags);
-
-done:
- return err;
-}
-
-static s32
-wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *event, void *data)
-{
- int err = BCME_OK;
- uint status = ntoh32(event->status);
- wl_auth_event_t *auth_data;
- wl_sae_key_info_t sae_key;
- uint16 tlv_buf_len;
-
- if (status == WLC_E_STATUS_SUCCESS) {
- auth_data = (wl_auth_event_t *)data;
- tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V1;
- if (auth_data->version != WL_AUTH_EVENT_DATA_V1) {
- WL_ERR(("unknown auth event data version %x\n",
- auth_data->version));
- err = BCME_VERSION;
- goto done;
- }
- /* check if PMK info present */
- sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
- WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
- if (!sae_key.pmk || !sae_key.pmk_len) {
- WL_ERR(("Mandatory PMK info not present"));
- err = BCME_NOTFOUND;
- goto done;
- }
- /* check if PMKID info present */
- sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
- WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
- if (!sae_key.pmkid || !sae_key.pmkid_len) {
- WL_ERR(("Mandatory PMKID info not present\n"));
- err = BCME_NOTFOUND;
- goto done;
- }
- memcpy(sae_key.bssid, event->addr.octet, ETHER_ADDR_LEN);
- err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
- if (err) {
- WL_ERR(("Failed to event sae key info\n"));
- }
- }
-done:
- return err;
-}
-#endif /* WL_SAE */
-
-static s32
-wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- u32 reason = ntoh32(e->reason);
- u32 event = ntoh32(e->event_type);
-#ifdef WL_SAE
- uint auth_type = ntoh32(e->auth_type);
-#endif /* WL_SAE */
- struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
- WL_DBG(("event type : %d, reason : %d\n", event, reason));
-
- if (sec) {
- switch (event) {
- case WLC_E_ASSOC:
- case WLC_E_AUTH:
- sec->auth_assoc_res_status = reason;
-#ifdef WL_SAE
- if (event == WLC_E_AUTH && auth_type == DOT11_SAE) {
- wl_bss_handle_sae_auth(cfg, ndev, e, data);
- }
-#endif /* WL_SAE */
- default:
- break;
- }
- } else
- WL_ERR(("sec is NULL\n"));
- return 0;
-}
-
static s32
wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
return err;
}
-void wl_cfg80211_disassoc(struct net_device *ndev)
+void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason)
{
scb_val_t scbval;
s32 err;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ BCM_REFERENCE(cfg);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
- memset(&scbval, 0x0, sizeof(scb_val_t));
- scbval.val = htod32(WLAN_REASON_DEAUTH_LEAVING);
+ memset_s(&scbval, sizeof(scb_val_t), 0x0, sizeof(scb_val_t));
+ scbval.val = htod32(reason);
err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
if (err < 0) {
WL_ERR(("WLC_DISASSOC error %d\n", err));
}
}
-
-static s32
-wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason)
{
- bool act;
- struct net_device *ndev = NULL;
- s32 err = 0;
- u32 event = ntoh32(e->event_type);
- struct wiphy *wiphy = NULL;
- struct cfg80211_bss *bss = NULL;
- struct wlc_ssid *ssid = NULL;
- u8 *bssid = 0;
- dhd_pub_t *dhdp;
- u32 mode;
- int vndr_oui_num = 0;
+ struct net_device *dev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ scb_val_t scb_val;
+ int err;
+ char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ int num_associated = 0;
+
+ dev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (p2p_is_on(cfg)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return;
+ }
+ }
+
+ assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+ err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+ assoc_maclist, sizeof(mac_buf));
+ if (err < 0)
+ WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+ else
+ num_associated = assoc_maclist->count;
+
+ memset(scb_val.ea.octet, 0xff, ETHER_ADDR_LEN);
+ scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+ scb_val.val = htod32(reason);
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0) {
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+ }
+
+ if (num_associated > 0)
+ wl_delay(400);
+
+ return;
+}
+/* API to handle the Deauth from the AP.
+* For now we are deleting the PMKID cache in DHD/FW
+* in case of current connection is using SAE authnetication
+*/
+static s32
+wl_cfg80211_handle_deauth_ind(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ int err = BCME_OK;
+#ifdef WL_SAE
+ uint8 bssid[ETHER_ADDR_LEN];
+ struct cfg80211_pmksa pmksa;
+ s32 val = 0;
+
+ err = wldev_iovar_getint(ndev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ goto done;
+ }
+ if (val == WPA3_AUTH_SAE_PSK) {
+ (void)memcpy_s(bssid, ETHER_ADDR_LEN,
+ (const uint8*)&e->addr, ETHER_ADDR_LEN);
+ memset_s(&pmksa, sizeof(pmksa), 0, sizeof(pmksa));
+ pmksa.bssid = bssid;
+ WL_INFORM_MEM(("Deleting the PMKSA for SAE AP "MACDBG,
+ MAC2STRDBG(e->addr.octet)));
+ wl_cfg80211_del_pmksa(cfg->wdev->wiphy, ndev, &pmksa);
+ }
+done:
+#endif /* WL_SAE */
+ return err;
+}
+
+static void
+wl_cache_assoc_resp_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ u32 datalen = ntoh32(e->datalen);
+ u32 event_type = ntoh32(e->event_type);
+
+ if (datalen > VNDR_IE_MIN_LEN &&
+ datalen < VNDR_IE_MAX_LEN &&
+ data) {
+ conn_info->resp_ie_len = datalen;
+ WL_DBG((" assoc resp IES len = %d\n", conn_info->resp_ie_len));
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ (void)memcpy_s(conn_info->resp_ie, sizeof(conn_info->resp_ie),
+ data, datalen);
+
+ WL_INFORM_MEM(("[%s] copied assoc resp ies, sent to upper layer:"
+ "event %d reason=%d ie_len=%d from " MACDBG "\n",
+ ndev->name, event_type, ntoh32(e->reason), datalen,
+ MAC2STRDBG((const u8*)(&e->addr))));
+ }
+}
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ struct net_device *ndev = NULL;
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 datalen = ntoh32(e->datalen);
+ struct wiphy *wiphy = NULL;
+ struct cfg80211_bss *bss = NULL;
+ struct wlc_ssid *ssid = NULL;
+ u8 *bssid = 0;
+ s32 bssidx = 0;
+ u8 *ie_ptr = NULL;
+ uint32 ie_len = 0;
+#ifdef WL_ANALYTICS
+ struct parsed_vndr_ies disco_vndr_ie;
+ struct parsed_vndr_ie_info *vndrie_info = NULL;
+ uint32 i = 0;
+#endif /* WL_ANALYTICS */
+
+ dhd_pub_t *dhdp;
+ u32 mode;
+ int vndr_oui_num = 0;
char vndr_oui[MAX_VNDR_OUI_STR_LEN] = {0, };
bool loc_gen = false;
+#ifdef DHD_LOSSLESS_ROAMING
+ struct wl_security *sec;
+#endif /* DHD_LOSSLESS_ROAMING */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef DHD_LOSSLESS_ROAMING
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#endif /* DHD_LOSSLESS_ROAMING */
dhdp = (dhd_pub_t *)(cfg->pub);
BCM_REFERENCE(dhdp);
wl_get_auth_assoc_status(cfg, ndev, e, data);
return 0;
}
+ if (event == WLC_E_ASSOC_RESP_IE) {
+ if (ntoh32(e->status) != WLC_E_STATUS_SUCCESS) {
+ wl_cache_assoc_resp_ies(cfg, ndev, e, data);
+ }
+ return 0;
+ }
+
DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
if (wl_is_linkup(cfg, e, ndev)) {
wl_link_up(cfg);
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
WL_INFORM_MEM(("[%s] link up for bssid " MACDBG "\n",
ndev->name, MAC2STRDBG((const u8*)(&e->addr))));
-
if ((event == WLC_E_LINK) &&
(ntoh16(e->flags) & WLC_EVENT_MSG_LINK) &&
!wl_get_drv_status(cfg, CONNECTED, ndev) &&
!wl_get_drv_status(cfg, CONNECTING, ndev)) {
WL_INFORM_MEM(("link up in non-connected/"
"non-connecting state\n"));
- wl_cfg80211_disassoc(ndev);
+ wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
return BCME_OK;
}
}
#endif /* WL_WPS_SYNC */
- if (((event == WLC_E_ROAM) || (event == WLC_E_BSSID)) &&
- !wl_get_drv_status(cfg, CONNECTED, ndev)) {
- /* Roam event in disconnected state. DHD-FW state
- * mismatch. Issue disassoc to clear fw state
- */
- WL_INFORM_MEM(("Roam even in disconnected state."
- " clear fw state\n"));
- wl_cfg80211_disassoc(ndev);
- return BCME_OK;
- }
if (event == WLC_E_LINK &&
#ifdef DHD_LOSSLESS_ROAMING
!cfg->roam_offload &&
+ !IS_AKM_SUITE_FT(sec) &&
#endif /* DHD_LOSSLESS_ROAMING */
wl_get_drv_status(cfg, CONNECTED, ndev)) {
wl_bss_roaming_done(cfg, ndev, e, data);
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
} else {
/* Initial Association */
+ wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
wl_bss_connect_done(cfg, ndev, e, data, true);
if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
ndev->name, vndr_oui));
}
}
+ if (event == WLC_E_LINK) {
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_CONNECT);
+ }
WL_DBG(("joined in BSS network \"%s\"\n",
- ((struct wlc_ssid *)
- wl_read_prof(cfg, ndev,
- WL_PROF_SSID))->SSID));
+ ((struct wlc_ssid *)wl_read_prof(cfg, ndev,
+ WL_PROF_SSID))->SSID));
}
}
wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
- } else if (WL_IS_LINKDOWN(cfg, e, data) ||
+ } else if (wl_is_linkdown(cfg, e) ||
((event == WLC_E_SET_SSID) &&
(ntoh32(e->status) != WLC_E_STATUS_SUCCESS) &&
(wl_get_drv_status(cfg, CONNECTED, ndev)))) {
+ if (wl_is_linkdown(cfg, e)) {
+ /* Clear IEs for disaasoc */
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ } else {
+ WL_ERR(("link down--clearing disconnect IEs\n"));
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, VNDR_IE_DISASSOC_FLAG,
+ NULL, 0)) != BCME_OK) {
+ WL_ERR(("Failed to clear ies err = %d\n", err));
+ }
+ }
+ }
WL_INFORM_MEM(("link down. connection state bit status: [%u:%u:%u:%u]\n",
wl_get_drv_status(cfg, CONNECTING, ndev),
return 0;
}
- wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
- FW_LOGSET_MASK_ALL);
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
+ FW_LOGSET_MASK_ALL);
+ }
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
#endif // endif
"changed 0xFF\n", event, reason));
reason = WLC_E_DEAUTH_MAX_REASON;
}
+ wl_cfg80211_handle_deauth_ind(cfg, ndev, e, data);
}
#ifdef SET_SSID_FAIL_CUSTOM_RC
- if (event == WLC_E_SET_SSID) {
+ if ((event == WLC_E_SET_SSID) &&
+ (ntoh32(e->status) == WLC_E_STATUS_TIMEOUT)) {
reason = SET_SSID_FAIL_CUSTOM_RC;
}
#endif /* SET_SSID_FAIL_CUSTOM_RC */
/* roam offload does not sync BSSID always, get it from dongle */
if (cfg->roam_offload) {
- memset(&bssid_dongle, 0, sizeof(bssid_dongle));
+ bzero(&bssid_dongle, sizeof(bssid_dongle));
if (wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid_dongle,
sizeof(bssid_dongle)) == BCME_OK) {
/* if not roam case, it would return null bssid */
wl_clr_drv_status(cfg, CONNECTED, ndev);
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev),
+ WLAN_REASON_DEAUTH_LEAVING);
/* To make sure disconnect, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
ndev->name, event, ntoh32(e->reason),
MAC2STRDBG((const u8*)(&e->addr))));
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_DONE),
+ dhd_net2idx(dhdp->info, ndev), reason);
/* Send up deauth and clear states */
- CFG80211_DISCONNECTED(ndev, reason, NULL, 0,
- loc_gen, GFP_KERNEL);
+
+ /*
+ * FW sends body and body len as a part of deauth
+ * and disassoc events (WLC_E_DISASSOC_IND, WLC_E_DEAUTH_IND)
+ * The VIEs sits after reason code in the body. Reason code is
+ * 2 bytes long.
+ */
+ WL_DBG(("recv disconnect ies ie_len = %d\n", ie_len));
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND) {
+ if ((datalen > DOT11_DISCONNECT_RC) &&
+ datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
+ data) {
+ ie_ptr = (uchar*)data + DOT11_DISCONNECT_RC;
+ ie_len = datalen - DOT11_DISCONNECT_RC;
+ }
+ } else if (event == WLC_E_LINK &&
+ ntoh32(e->reason) == WLC_E_LINK_BCN_LOSS) {
+#ifdef WL_ANALYTICS
+ /*
+ * In case of linkdown, FW sends prb rsp IEs. Disco VIE
+ * are appended with prb rsp ies. Remove prb rsp IES and
+ * send disco vie to upper layer.
+ * Disco VIE has fixed len of 11 octets.
+ * As per SS spec.(2 octet header + 9 octet VIE)
+ */
+ if (datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
+ datalen >= DOT11_DISCONNECT_RC &&
+ ((err = wl_cfg80211_parse_vndr_ies(
+ (const u8 *)data, datalen,
+ &disco_vndr_ie)) == BCME_OK)) {
+ for (i = 0; i < disco_vndr_ie.count; i++) {
+ vndrie_info = &disco_vndr_ie.ie_info[i];
+ if ((vndrie_info->vndrie.id ==
+ 0xDD) && (!memcmp(
+ vndrie_info->vndrie.oui,
+ SSE_OUI, DOT11_OUI_LEN)) &&
+ (vndrie_info->vndrie.data[0] ==
+ VENDOR_ENTERPRISE_STA_OUI_TYPE)) {
+ ie_ptr = (u8 *)vndrie_info->ie_ptr;
+ ie_len = vndrie_info->ie_len;
+ }
+ }
+ }
+#endif /* WL_ANALYTICS */
+ }
+
+ CFG80211_DISCONNECTED(ndev, reason, ie_ptr, ie_len,
+ loc_gen, GFP_KERNEL);
+ WL_INFORM_MEM(("[%s] Disconnect event sent to upper layer"
+ "event:%d reason=%d ie_len=%d from " MACDBG "\n",
+ ndev->name, event, ntoh32(e->reason), ie_len,
+ MAC2STRDBG((const u8*)(&e->addr))));
+
+ /* Wait for status to be cleared to prevent race condition
+ * issues with connect context
+ */
+ wl_cfg80211_disconnect_state_sync(cfg, ndev);
wl_link_down(cfg);
wl_init_prof(cfg, ndev);
}
else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
WL_INFORM_MEM(("link down, during connecting\n"));
- /* Issue WLC_DISASSOC to prevent FW roam attempts */
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (err < 0) {
- WL_ERR(("CONNECTING state, WLC_DISASSOC error %d\n", err));
- err = 0;
- }
- WL_DBG(("Clear drv CONNECTING status\n"));
- wl_clr_drv_status(cfg, CONNECTING, ndev);
+ /* Issue WLC_DISASSOC to prevent FW roam attempts.
+ * Do not issue WLC_DISASSOC again if the linkdown is
+ * generated due to local disassoc, to avoid connect-disconnect
+ * loop.
+ */
+ if (!((event == WLC_E_LINK) &&
+ (ntoh32(e->reason) == WLC_E_LINK_DISASSOC) &&
+ (ntoh32(e->status) == WLC_E_STATUS_SUCCESS))) {
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("CONNECTING state,"
+ " WLC_DISASSOC error %d\n",
+ err));
+ err = 0;
+ }
#ifdef ESCAN_RESULT_PATCH
- if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
- /* In case this event comes while associating another AP */
+ if ((memcmp(connect_req_bssid, broad_bssid,
+ ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, broad_bssid,
+ ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, connect_req_bssid,
+ ETHER_ADDR_LEN) == 0))
+ /* In case this event comes while associating
+ * another AP
+ */
#endif /* ESCAN_RESULT_PATCH */
- wl_bss_connect_done(cfg, ndev, e, data, false);
+ wl_bss_connect_done(cfg, ndev, e, data, false);
+ }
}
wl_clr_drv_status(cfg, DISCONNECTING, ndev);
wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
/* Dump FW preserve buffer content */
wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
- wl_get_drv_status(cfg, CONNECTING, ndev)) {
- wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- wl_clr_drv_status(cfg, CONNECTING, ndev);
- wl_cfg80211_scan_abort(cfg);
- DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
- return err;
- }
/* Clean up any pending scan request */
wl_cfg80211_cancel_scan(cfg);
if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
- WL_INFORM_MEM(("Issuing Dissassoc to prevent FW retries\n"));
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (err < 0) {
- WL_ERR(("CONNECTING state, WLC_DISASSOC error %d\n", err));
- err = 0;
+ if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ WL_INFORM_MEM(("wl dissassoc\n"));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", err));
+ err = 0;
+ }
+ } else {
+ WL_DBG(("connect fail. clear disconnecting bit\n"));
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
}
- WL_DBG(("Clear drv CONNECTING status\n"));
wl_bss_connect_done(cfg, ndev, e, data, false);
wl_clr_drv_status(cfg, CONNECTING, ndev);
+ WL_INFORM_MEM(("connect fail reported\n"));
}
} else {
WL_DBG(("%s nothing\n", __FUNCTION__));
}
DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
- }
- else {
- printf("wl_notify_connect_status : Invalid %s mode %d event %d status %d\n",
- ndev->name, wl_get_mode_by_netdev(cfg, ndev), ntoh32(e->event_type),
+ } else {
+ WL_MSG(ndev->name, "Invalid mode %d event %d status %d\n",
+ wl_get_mode_by_netdev(cfg, ndev), ntoh32(e->event_type),
ntoh32(e->status));
}
return err;
#ifdef DHD_LOSSLESS_ROAMING
struct wl_security *sec;
#endif // endif
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
WL_DBG(("Enter \n"));
+ BCM_REFERENCE(dhdp);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
*/
if (IS_AKM_SUITE_FT(sec)) {
wl_bss_roaming_done(cfg, ndev, e, data);
+ /* Arm pkt logging timer */
+ dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
}
/* Roam timer is deleted mostly from wl_cfg80211_change_station
* after roaming is finished successfully. We need to delete
wl_del_roam_timeout(cfg);
}
#else
+#if !defined(DHD_NONFT_ROAMING)
wl_bss_roaming_done(cfg, ndev, e, data);
+#endif /* !DHD_NONFT_ROAMING */
#endif /* DHD_LOSSLESS_ROAMING */
} else {
wl_bss_connect_done(cfg, ndev, e, data, true);
#ifdef CUSTOM_EVENT_PM_WAKE
uint32 last_dpm_upd_time = 0; /* ms */
-#define DPM_UPD_LMT_TIME (CUSTOM_EVENT_PM_WAKE + 5) * 1000 * 4 /* ms */
+#define DPM_UPD_LMT_TIME ((CUSTOM_EVENT_PM_WAKE + (5)) * (1000) * (4)) /* ms */
#define DPM_UPD_LMT_RSSI -85 /* dbm */
static s32
}
err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
- "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+ "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN,
+ 0, &cfg->ioctl_buf_sync);
if (err) {
WL_ERR(("dump ioctl err = %d", err));
cur_dpm_upd_time = OSL_SYSUPTIME();
if (cur_dpm_upd_time - last_dpm_upd_time < DPM_UPD_LMT_TIME) {
scb_val_t scbval;
+ DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhd->info, ndev), 0);
bzero(&scbval, sizeof(scb_val_t));
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC,
+ &scbval, sizeof(scb_val_t));
if (err < 0) {
- WL_ERR(("%s: Disassoc error %d\n", __FUNCTION__, err));
+ WL_ERR(("Disassoc error %d\n", err));
return err;
}
- WL_ERR(("%s: Force Disassoc due to updated DPM event.\n", __FUNCTION__));
+ WL_ERR(("Force Disassoc due to updated DPM event.\n"));
last_dpm_upd_time = 0;
} else {
WL_ERR(("Attempting roam with reason code : %d\n", reason));
}
+#ifdef CONFIG_SILENT_ROAM
+ if (dhdp->in_suspend && reason == WLC_E_REASON_SILENT_ROAM) {
+ dhdp->sroamed = TRUE;
+ }
+#endif /* CONFIG_SILENT_ROAM */
+
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
#ifdef DBG_PKT_MON
assoc_info.req_len = htod32(assoc_info.req_len);
assoc_info.resp_len = htod32(assoc_info.resp_len);
assoc_info.flags = htod32(assoc_info.flags);
-
- if (assoc_info.req_len > (MAX_REQ_LINE + sizeof(struct dot11_assoc_req) +
- ((assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) ? ETHER_ADDR_LEN : 0))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if ((assoc_info.req_len > 0) &&
- (assoc_info.req_len < (sizeof(struct dot11_assoc_req) +
- ((assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) ? ETHER_ADDR_LEN : 0)))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if (assoc_info.resp_len > (MAX_REQ_LINE + sizeof(struct dot11_assoc_resp))) {
- err = BCME_BADLEN;
- goto exit;
- }
- if ((assoc_info.resp_len > 0) && (assoc_info.resp_len < sizeof(struct dot11_assoc_resp))) {
- err = BCME_BADLEN;
- goto exit;
- }
-
if (conn_info->req_ie_len) {
conn_info->req_ie_len = 0;
bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
assoc_info.req_len, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc req (%d)\n", err));
- goto exit;
+ return err;
}
if (assoc_info.req_len < sizeof(struct dot11_assoc_req)) {
WL_ERR(("req_len %d lessthan %d \n", assoc_info.req_len,
(int)sizeof(struct dot11_assoc_req)));
return BCME_BADLEN;
}
- conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+ conn_info->req_ie_len = (uint32)(assoc_info.req_len
+ - sizeof(struct dot11_assoc_req));
if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
conn_info->req_ie_len -= ETHER_ADDR_LEN;
}
- memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
}
if (assoc_info.resp_len) {
assoc_info.resp_len, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc resp (%d)\n", err));
- goto exit;
+ return err;
}
if (assoc_info.resp_len < sizeof(struct dot11_assoc_resp)) {
WL_ERR(("resp_len %d is lessthan %d \n", assoc_info.resp_len,
(int)sizeof(struct dot11_assoc_resp)));
+ return BCME_BADLEN;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -
+ (uint32)sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
+ memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+ } else {
+ WL_ERR(("IE size %d above max %d size \n",
+ conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
}
- conn_info->resp_ie_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
- memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+
#ifdef QOS_MAP_SET
/* find qos map set ie */
if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
- cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
+ } else {
+ conn_info->resp_ie_len = 0;
}
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
-exit:
- if (err) {
- WL_ERR(("err:%d, assoc_info-req:%u,resp:%u conn_info-req:%u,resp:%u\n",
- err, assoc_info.req_len, assoc_info.resp_len,
- conn_info->req_ie_len, conn_info->resp_ie_len));
- }
return err;
}
return 0;
}
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ bool update_ssid)
{
struct cfg80211_bss *bss;
wl_bss_info_t *bi;
err = -EIO;
goto update_bss_info_out;
}
- err = wl_inform_single_bss(cfg, bi, roam);
+ err = wl_inform_single_bss(cfg, bi, update_ssid);
if (unlikely(err))
goto update_bss_info_out;
struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
#endif // endif
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ dhd_if_t *ifp = NULL;
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
#ifdef WLFBT
uint32 data_len = 0;
if (data)
data_len = ntoh32(e->datalen);
#endif /* WLFBT */
+ BCM_REFERENCE(dhdp);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
#ifdef BCM4359_CHIP
* Also clear timer roam_timeout.
* Only used on BCM4359 devices.
*/
- memset(&ci, 0, sizeof(ci));
+ bzero(&ci, sizeof(ci));
if ((wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci,
sizeof(ci))) < 0) {
WL_ERR(("Failed to get current channel !"));
#endif /* BCM4359 CHIP */
if ((err = wl_get_assoc_ies(cfg, ndev)) != BCME_OK) {
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
WL_ERR(("Fetching Assoc IEs failed, Skipping roamed event to"
" upper layer\n"));
/* To make sure disconnect, and fw sync, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
- memset(&scbval, 0, sizeof(scb_val_t));
+ bzero(&scbval, sizeof(scb_val_t));
scbval.val = WLAN_REASON_DEAUTH_LEAVING;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
}
#endif /* CUSTOM_LONG_RETRY_LIMIT */
+ DHD_STATLOG_CTRL(dhdp, ST(REASSOC_INFORM),
+ dhd_net2idx(dhdp->info, ndev), 0);
WL_ERR(("Report roam event to upper layer. " MACDBG " (ch:%d)\n",
MAC2STRDBG((const u8*)(&e->addr)), *channel));
wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
memcpy(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN);
wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
+ ifp = dhd_get_ifp(dhdp, e->ifidx);
+ if (ifp) {
+ ifp->post_roam_evt = TRUE;
+ }
+#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+
return err;
fail:
return ret;
}
-static void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- struct cfg80211_scan_info info;
-
- memset(&info, 0, sizeof(struct cfg80211_scan_info));
- info.aborted = aborted;
- cfg80211_scan_done(cfg->scan_request, &info);
-#else
- cfg80211_scan_done(cfg->scan_request, aborted);
-#endif // endif
-}
-
#ifdef WL_FILS
static s32
wl_get_fils_connect_params(struct bcm_cfg80211 *cfg, struct net_device *ndev)
fils_info->fils_erp_next_seq_num = *(const u16 *)data;
break;
case WL_FILS_XTLV_KEK:
- memcpy(fils_info->fils_kek, data, len);
+ if (memcpy_s(fils_info->fils_kek,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
fils_info->fils_kek_len = len;
break;
case WL_FILS_XTLV_PMK:
- memcpy(fils_info->fils_pmk, data, len);
+ if (memcpy_s(fils_info->fils_pmk,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
fils_info->fils_pmk_len = len;
break;
case WL_FILS_XTLV_PMKID:
- memcpy(fils_info->fils_pmkid, data, len);
+ if (memcpy_s(fils_info->fils_pmkid,
+ WL_MAX_FILS_KEY_LEN, data, len) < 0) {
+ err = BCME_BADARG;
+ goto exit;
+ }
break;
default:
WL_ERR(("%s: wrong XTLV code\n", __func__));
#endif /* WL_FILS */
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
u32 event_type = ntoh32(e->event_type);
-#ifdef WL_IRQSET
- int delta_time = 0;
-#endif /* WL_IRQSET */
struct cfg80211_bss *bss = NULL;
dhd_pub_t *dhdp;
dhdp = (dhd_pub_t *)(cfg->pub);
WL_ERR(("sec is NULL\n"));
return -ENODEV;
}
-
WL_DBG((" enter\n"));
#ifdef ESCAN_RESULT_PATCH
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
- WL_INFORM_MEM((" Connected event of connected "
- "device e=%d s=%d, ignore it\n",
+ WL_INFORM_MEM((" Connected event of connected device "
+ "e=%d s=%d, ignore it\n",
ntoh32(e->event_type), ntoh32(e->status)));
return err;
}
}
#else
if (cfg->scan_request) {
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
}
#endif /* ESCAN_RESULT_PATCH */
if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
wl_cfg80211_scan_abort(cfg);
- wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed) {
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- wl_update_bss_info(cfg, ndev, false);
+ /*
+ * CFG layer relies on cached IEs (from probe/beacon) to fetch matching bss.
+ * For cases, there is no match available,
+ * need to update the cache based on bss info from fw.
+ */
+ wl_update_bss_info(cfg, ndev, true);
wl_update_pmklist(ndev, cfg->pmk_list, err);
wl_set_drv_status(cfg, CONNECTED, ndev);
#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
}
#endif /* CUSTOM_LONG_RETRY_LIMIT */
-#ifdef WL_IRQSET
- dhd_irq_set_affinity(dhdp);
- delta_time = IRQ_SET_DURATION - local_clock() / USEC_PER_SEC;
- if (delta_time > 0) {
- schedule_delayed_work(&cfg->irq_set_work,
- msecs_to_jiffies((const unsigned int)delta_time));
- }
-#endif /* WL_IRQSET */
- memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN);
+ bzero(&cfg->last_roamed_addr, ETHER_ADDR_LEN);
}
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed && (wl_cfg80211_verify_bss(cfg, ndev, &bss) != true)) {
/* If bss entry is not available in the cfg80211 bss cache
resp_params.req_ie_len = conn_info->req_ie_len;
resp_params.resp_ie = conn_info->resp_ie;
resp_params.resp_ie_len = conn_info->resp_ie_len;
-#ifdef WL_FILS_ROAM_OFFLD
+#if defined(WL_FILS_ROAM_OFFLD) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
resp_params.fils.kek = fils_info->fils_kek;
resp_params.fils.kek_len = fils_info->fils_kek_len;
resp_params.fils.update_erp_next_seq_num = true;
"connection succeeded\n");
wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
} else {
- WL_ERR(("[%s] Report connect result - connection failed\n", ndev->name));
+ WL_MSG(ndev->name, "Report connect result - connection failed\n");
wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
WL_EXT_STATUS_DISCONNECTED, NULL);
}
}
#endif /* BT_WIFI_HANDOVER */
-#ifdef PNO_SUPPORT
static s32
-wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
+ const struct ether_addr *da, const struct ether_addr *sa,
+ const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody)
{
- struct net_device *ndev = NULL;
-#ifdef GSCAN_SUPPORT
- void *ptr;
- int send_evt_bytes = 0;
- u32 event = be32_to_cpu(e->event_type);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
-#endif /* GSCAN_SUPPORT */
-
- WL_INFORM_MEM((">>> PNO Event\n"));
+ struct dot11_management_header *hdr;
+ u32 totlen = 0;
+ s32 err = 0;
+ u8 *offset;
+ u32 prebody_len = *body_len;
+ switch (fc) {
+ case FC_ASSOC_REQ:
+ /* capability , listen interval */
+ totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+ break;
- if (!data) {
- WL_ERR(("Data received is NULL!\n"));
- return 0;
+ case FC_REASSOC_REQ:
+ /* capability, listen inteval, ap address */
+ totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+ break;
}
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef GSCAN_SUPPORT
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
+ totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+ *pheader = (u8 *)MALLOCZ(cfg->osh, totlen);
+ if (*pheader == NULL) {
+ WL_ERR(("memory alloc failed \n"));
+ return -ENOMEM;
}
- if (!dhd_dev_is_legacy_pno_enabled(ndev))
- return 0;
-#endif /* GSCAN_SUPPORT */
-
-#ifndef WL_SCHED_SCAN
- mutex_lock(&cfg->usr_sync);
- /* TODO: Use cfg80211_sched_scan_results(wiphy); */
- CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
- mutex_unlock(&cfg->usr_sync);
-#else
- /* If cfg80211 scheduled scan is supported, report the pno results via sched
- * scan results
- */
- wl_notify_sched_scan_results(cfg, ndev, e, data);
-#endif /* WL_SCHED_SCAN */
- return 0;
-}
-#endif /* PNO_SUPPORT */
-
-#ifdef GSCAN_SUPPORT
-static s32
-wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- void *ptr = NULL;
- int send_evt_bytes = 0;
- int event_type;
- struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- u32 len = ntoh32(e->datalen);
- u32 buf_len = 0;
-
- switch (event) {
- case WLC_E_PFN_BEST_BATCHING:
- err = dhd_dev_retrieve_batch_scan(ndev);
- if (err < 0) {
- WL_ERR(("Batch retrieval already in progress %d\n", err));
- } else {
- event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
- if (data && len) {
- event_type = *((int *)data);
- }
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_GSCAN_BATCH_SCAN_EVENT,
- &event_type, sizeof(int));
- }
- break;
- case WLC_E_PFN_SCAN_COMPLETE:
- event_type = WIFI_SCAN_COMPLETE;
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_COMPLETE_EVENT,
- &event_type, sizeof(int));
- break;
- case WLC_E_PFN_BSSID_NET_FOUND:
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_FOUND, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_BSSID_NET_LOST:
- /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
- * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
- */
- if (len) {
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_LOST, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
- MFREE(cfg->osh, ptr, buf_len);
- } else {
- err = -ENOMEM;
- }
- } else {
- err = -EINVAL;
- }
- break;
- case WLC_E_PFN_GSCAN_FULL_RESULT:
- ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_SSID_EXT:
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- default:
- WL_ERR(("Unknown event %d\n", event));
- break;
- }
- return err;
-}
-#endif /* GSCAN_SUPPORT */
-
-static s32
-wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- struct channel_info channel_inform;
- struct wl_scan_results *bss_list;
- struct net_device *ndev = NULL;
- u32 len = WL_SCAN_BUF_MAX;
- s32 err = 0;
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
- WL_DBG(("scan is not ready \n"));
- return err;
- }
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->usr_sync);
- wl_clr_drv_status(cfg, SCANNING, ndev);
- memset(&channel_inform, 0, sizeof(channel_inform));
- err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
- sizeof(channel_inform));
- if (unlikely(err)) {
- WL_ERR(("scan busy (%d)\n", err));
- goto scan_done_out;
- }
- channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
- if (unlikely(channel_inform.scan_channel)) {
-
- WL_DBG(("channel_inform.scan_channel (%d)\n",
- channel_inform.scan_channel));
- }
- cfg->bss_list = cfg->scan_results;
- bss_list = cfg->bss_list;
- memset(bss_list, 0, len);
- bss_list->buflen = htod32(len);
- err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
- if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
- WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
- err = -EINVAL;
- goto scan_done_out;
- }
- bss_list->buflen = dtoh32(bss_list->buflen);
- bss_list->version = dtoh32(bss_list->version);
- bss_list->count = dtoh32(bss_list->count);
-
- err = wl_inform_bss(cfg);
-
-scan_done_out:
- del_timer_sync(&cfg->scan_timeout);
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- if (cfg->scan_request) {
- wl_notify_scan_done(cfg, false);
- cfg->scan_request = NULL;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- WL_DBG(("cfg80211_scan_done\n"));
- mutex_unlock(&cfg->usr_sync);
- return err;
-}
-
-static s32
-wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
- const struct ether_addr *da, const struct ether_addr *sa,
- const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody)
-{
- struct dot11_management_header *hdr;
- u32 totlen = 0;
- s32 err = 0;
- u8 *offset;
- u32 prebody_len = *body_len;
- switch (fc) {
- case FC_ASSOC_REQ:
- /* capability , listen interval */
- totlen = DOT11_ASSOC_REQ_FIXED_LEN;
- *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
- break;
-
- case FC_REASSOC_REQ:
- /* capability, listen inteval, ap address */
- totlen = DOT11_REASSOC_REQ_FIXED_LEN;
- *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
- break;
- }
- totlen += DOT11_MGMT_HDR_LEN + prebody_len;
- *pheader = (u8 *)MALLOCZ(cfg->osh, totlen);
- if (*pheader == NULL) {
- WL_ERR(("memory alloc failed \n"));
- return -ENOMEM;
- }
- hdr = (struct dot11_management_header *) (*pheader);
- hdr->fc = htol16(fc);
- hdr->durid = 0;
- hdr->seq = 0;
- offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
- bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
- bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
- bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
- if ((pbody != NULL) && prebody_len)
- bcopy((const char*)pbody, offset, prebody_len);
- *body_len = totlen;
- return err;
-}
+ hdr = (struct dot11_management_header *) (*pheader);
+ hdr->fc = htol16(fc);
+ hdr->durid = 0;
+ hdr->seq = 0;
+ offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+ bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+ bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+ bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+ if ((pbody != NULL) && prebody_len)
+ bcopy((const char*)pbody, offset, prebody_len);
+ *body_len = totlen;
+ return err;
+}
#ifdef WL_CFG80211_GON_COLLISION
static void
WL_ERR(("wrong datalen:%d\n", ntoh32(e->datalen)));
return -EINVAL;
}
- mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+ mgmt_frame_len = ntoh32(e->datalen) - (uint32)sizeof(wl_event_rx_frame_data_t);
event = ntoh32(e->event_type);
bsscfgidx = e->bsscfgidx;
rxframe = (wl_event_rx_frame_data_t *)data;
return -EINVAL;
}
channel = (ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK);
- memset(&bssid, 0, ETHER_ADDR_LEN);
+ bzero(&bssid, ETHER_ADDR_LEN);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if ((ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) &&
(event == WLC_E_PROBREQ_MSG)) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
struct net_info *iter, *next;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev && iter->wdev &&
iter->wdev->iftype == NL80211_IFTYPE_AP) {
ndev = iter->ndev;
break;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
}
if (channel <= CH_MAX_2G_CHANNEL)
#ifdef TDLS_MSG_ONLY_WFD
if (!dhdp->tdls_mode) {
WL_DBG((" TDLS Frame filtered \n"));
- return 0;
+ goto exit;
}
#else
if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
- cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
} else {
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
if (cfg->next_af_subtype == act_frm->subtype) {
- WL_DBG(("Abort wait for next frame, Recieved frame (%d) "
- "Next action frame(%d)\n",
- act_frm->subtype, cfg->next_af_subtype));
+ WL_DBG(("We got a right next frame!(%d)\n",
+ act_frm->subtype));
wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
return err;
}
-#ifdef WL_SCHED_SCAN
-/* If target scan is not reliable, set the below define to "1" to do a
- * full escan
- */
-#define FULL_ESCAN_ON_PFN_NET_FOUND 0
-static s32
-wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
- wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- int err = 0;
- struct cfg80211_scan_request *request = NULL;
- struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
- struct ieee80211_channel *channel = NULL;
- int channel_req = 0;
- int band = 0;
- wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
- wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
- int n_pfn_results = 0;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
-
- WL_DBG(("Enter\n"));
-
- /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
- * in size and SSID offset, allowing v1 to be used below except for the results
- * fields themselves (status, count, offset to netinfo).
- */
- STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
- STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
- OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
-
- /* Extract the version-specific items */
- if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
- n_pfn_results = pfn_result_v1->count;
- pnetinfo = pfn_result_v1->netinfo;
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- memset(&ssid, 0x00, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo = &pnetinfo[i];
- if (!netinfo) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- WL_PNO((">>> SSID:%s Channel:%d \n",
- netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by
- * the supplicant. (For e.g IEs) Do a target Escan so that
- * sched scan results are reported via wl_inform_single_bss in
- * the required format. Escan does require the scan request in
- * the form of cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
-
- ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
- netinfo->pfnsubnet.SSID_len);
- memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID,
- ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = netinfo->pfnsubnet.SSID_len;
- memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- memcpy(tlv_data->value, &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- memcpy(tlv_data->value, &netinfo->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_notify_escan_complete(cfg, ndev, true, true);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
-
- } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- n_pfn_results = pfn_result_v2->count;
- pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
-
- if (e->event_type == WLC_E_PFN_NET_LOST) {
- WL_PNO(("Do Nothing %d\n", e->event_type));
- return 0;
- }
-
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- memset(&ssid, 0x00, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo_v2 = &pnetinfo_v2[i];
- if (!netinfo_v2) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- WL_PNO((">>> SSID:%s Channel:%d \n",
- netinfo_v2->pfnsubnet.u.SSID,
- netinfo_v2->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by the
- * supplicant. (For e.g IEs) Do a target Escan so that sched scan
- * results are reported via wl_inform_single_bss in the required
- * format. Escan does require the scan request in the form of
- * cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
- ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
- netinfo_v2->pfnsubnet.SSID_len);
- memcpy(ssid[i].ssid, netinfo_v2->pfnsubnet.u.SSID,
- ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo_v2->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
- memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- memcpy(tlv_data->value, &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- memcpy(tlv_data->value, &netinfo_v2->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_notify_escan_complete(cfg, ndev, true, true);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
-
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
- } else {
- WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
- PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
- return 0;
- }
-out_err:
- if (request) {
- MFREE(cfg->osh, request,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- }
- if (channel) {
- MFREE(cfg->osh, channel,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- }
-
- if (event_data) {
- if (event_data->tlvs) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- }
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return err;
-}
-#endif /* WL_SCHED_SCAN */
-
static void wl_init_conf(struct wl_conf *conf)
{
WL_DBG(("Enter \n"));
unsigned long flags;
struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- memset(profile, 0, sizeof(struct wl_profile));
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ if (!profile) {
+ WL_ERR(("profile null\n"));
+ return;
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ bzero(profile, sizeof(struct wl_profile));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
}
static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
{
- memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+ bzero(cfg->evt_handler, sizeof(cfg->evt_handler));
cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_AUTH_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC_RESP_IE] = wl_notify_connect_status;
#ifdef PNO_SUPPORT
cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
#endif /* PNO_SUPPORT */
cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
cfg->evt_handler[WLC_E_ROAM_START] = wl_notify_roam_start_status;
+ cfg->evt_handler[WLC_E_PSK_SUP] = wl_cfg80211_sup_event_handler;
+#ifdef WL_BCNRECV
+ cfg->evt_handler[WLC_E_BCNRECV_ABORTED] = wl_bcnrecv_aborted_event_handler;
+#endif /* WL_BCNRECV */
#ifdef WL_MBO
cfg->evt_handler[WLC_E_MBO] = wl_mbo_event_handler;
-#endif /* WL_MBO */
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
+ cfg->evt_handler[WLC_E_ADDTS_IND] = wl_cfg80211_cac_event_handler;
+ cfg->evt_handler[WLC_E_DELTS_IND] = wl_cfg80211_cac_event_handler;
+#endif /* WL_CAC_TS */
+#if defined(WL_MBO) || defined(WL_OCE)
+ cfg->evt_handler[WLC_E_PRUNE] = wl_bssid_prune_event_handler;
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+ cfg->evt_handler[WLC_E_PROXD] = wl_cfg80211_rtt_event_handler;
+#endif // endif
+#ifdef WL_CHAN_UTIL
+ cfg->evt_handler[WLC_E_BSS_LOAD] = wl_cfg80211_bssload_report_event_handler;
+#endif /* WL_CHAN_UTIL */
}
#if defined(STATIC_WL_PRIV_STRUCT)
static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
{
MFREE(cfg->osh, cfg->scan_results, WL_SCAN_BUF_MAX);
- cfg->scan_results = NULL;
MFREE(cfg->osh, cfg->conf, sizeof(*cfg->conf));
- cfg->conf = NULL;
MFREE(cfg->osh, cfg->scan_req_int, sizeof(*cfg->scan_req_int));
- cfg->scan_req_int = NULL;
MFREE(cfg->osh, cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
- cfg->ioctl_buf = NULL;
MFREE(cfg->osh, cfg->escan_ioctl_buf, WLC_IOCTL_MAXLEN);
- cfg->escan_ioctl_buf = NULL;
MFREE(cfg->osh, cfg->extra_buf, WL_EXTRA_BUF_MAX);
- cfg->extra_buf = NULL;
MFREE(cfg->osh, cfg->pmk_list, sizeof(*cfg->pmk_list));
- cfg->pmk_list = NULL;
#if defined(STATIC_WL_PRIV_STRUCT)
MFREE(cfg->osh, cfg->conn_info, sizeof(*cfg->conn_info));
- cfg->conn_info = NULL;
MFREE(cfg->osh, cfg->ie, sizeof(*cfg->ie));
- cfg->ie = NULL;
wl_deinit_escan_result_buf(cfg);
#endif /* STATIC_WL_PRIV_STRUCT */
if (cfg->afx_hdl) {
cancel_work_sync(&cfg->afx_hdl->work);
MFREE(cfg->osh, cfg->afx_hdl, sizeof(*cfg->afx_hdl));
- cfg->afx_hdl = NULL;
}
}
}
}
-static void wl_scan_timeout(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-)
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
{
- wl_event_msg_t msg;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct bcm_cfg80211 *cfg = from_timer(cfg, t, scan_timeout);
-#else
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
-#endif
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
- struct wl_scan_results *bss_list;
- wl_bss_info_t *bi = NULL;
- s32 i;
- u32 channel;
- u64 cur_time = OSL_SYSUPTIME_US();
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#if 0
- uint32 prev_memdump_mode = dhdp->memdump_enabled;
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- if (!(cfg->scan_request)) {
- WL_ERR(("timer expired but no scan request\n"));
- return;
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
}
- WL_ERR(("***SCAN event timeout. WQ state:0x%x enq_time:%llu "
- "deq_time:%llu evt_hdlr_entry_time:%llu evt_hdlr_exit_time:%llu "
- "current_time:%llu\n",
- work_busy(&cfg->event_work), cfg->scan_enq_time, cfg->wl_evt_deq_time,
- cfg->wl_evt_hdlr_entry_time, cfg->wl_evt_hdlr_exit_time, cur_time));
- if (cfg->scan_enq_time) {
- WL_ERR(("Elapsed time: %llu\n", (cur_time - cfg->scan_enq_time)));
- }
- WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
- mutex_is_locked(&cfg->if_sync),
- mutex_is_locked(&cfg->usr_sync),
- mutex_is_locked(&cfg->pm_sync),
- mutex_is_locked(&cfg->scan_complete),
- spin_is_locked(&cfg->cfgdrv_lock),
- spin_is_locked(&cfg->eq_lock)));
- dhd_bus_intr_count_dump(dhdp);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
- /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
- show_workqueue_state();
-#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
-
- bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!bss_list) {
- WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
- } else {
- WL_ERR(("Dump scan buffer:\n"
- "scanned AP count (%d)\n", bss_list->count));
+}
- bi = next_bss(bss_list, bi);
- for_each_bss(bss_list, bi, i) {
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
- WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
- }
- }
+static void wl_roam_timeout(unsigned long data)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
- if (cfg->scan_request->dev)
- wdev = cfg->scan_request->dev->ieee80211_ptr;
-#else
- wdev = cfg->scan_request->wdev;
-#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
- if (!wdev) {
- WL_ERR(("No wireless_dev present\n"));
- return;
- }
- ndev = wdev_to_wlc_ndev(wdev, cfg);
+ WL_ERR(("roam timer expired\n"));
- bzero(&msg, sizeof(wl_event_msg_t));
- WL_ERR(("timer expired\n"));
-#if 0
- if (dhdp->memdump_enabled) {
- dhdp->memdump_enabled = DUMP_MEMFILE;
- dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
- dhd_bus_mem_dump(dhdp);
- dhdp->memdump_enabled = prev_memdump_mode;
- }
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- msg.event_type = hton32(WLC_E_ESCAN_RESULT);
- msg.status = hton32(WLC_E_STATUS_TIMEOUT);
- msg.reason = 0xFFFFFFFF;
- wl_cfg80211_event(ndev, &msg, NULL);
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
}
-#ifdef DHD_LOSSLESS_ROAMING
-static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+#define CP_CHAN_INFO_RAT_MODE_LTE 3
+#define CP_CHAN_INFO_RAT_MODE_NR5G 7
+int g_mhs_chan_for_cpcoex = 0;
+
+struct __packed cam_cp_noti_info {
+ u8 rat;
+ u32 band;
+ u32 channel;
+};
+
+int
+wl_cfg80211_send_msg_to_ril()
{
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int id, buf = 1;
- /* restore prec_map to ALLPRIO */
- dhdp->dequeue_prec_map = ALLPRIO;
- if (timer_pending(&cfg->roam_timeout)) {
- del_timer_sync(&cfg->roam_timeout);
- }
+ id = IPC_SYSTEM_CP_CHANNEL_INFO;
+ dev_ril_bridge_send_msg(id, sizeof(int), &buf);
+ WL_ERR(("[BeyondX] send message to ril.\n"));
+ OSL_SLEEP(500);
+ return 0;
}
-static void wl_roam_timeout(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-)
+int
+wl_cfg80211_ril_bridge_notifier_call(struct notifier_block *nb,
+ unsigned long size, void *buf)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct bcm_cfg80211 *cfg = from_timer(cfg, t, roam_timeout);
-#else
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
-#endif
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ struct dev_ril_bridge_msg *msg;
+ struct cam_cp_noti_info *cp_noti_info;
+ static int mhs_channel_for_4g, mhs_channel_for_5g;
+ static int recv_msg_4g, recv_msg_5g;
- WL_ERR(("roam timer expired\n"));
+ WL_ERR(("[BeyondX] receive message from ril.\n"));
+ msg = (struct dev_ril_bridge_msg *)buf;
- /* restore prec_map to ALLPRIO */
- dhdp->dequeue_prec_map = ALLPRIO;
+ if (msg->dev_id == IPC_SYSTEM_CP_CHANNEL_INFO &&
+ msg->data_len <= sizeof(struct cam_cp_noti_info)) {
+ u8 rat;
+ u32 band;
+ u32 channel;
+
+ cp_noti_info = (struct cam_cp_noti_info *)msg->data;
+ rat = cp_noti_info->rat;
+ band = cp_noti_info->band;
+ channel = cp_noti_info->channel;
+
+ /* LTE/5G Band/Freq information => Mobile Hotspot channel mapping.
+ * LTE/B40: 38650~39649 => Ch.11
+ * LTE/B41: 39650~41589 => Ch.1
+ * 5G/N41: 499200~537999 => Ch.1
+ */
+ if (rat == CP_CHAN_INFO_RAT_MODE_LTE) {
+ recv_msg_4g = 1;
+ if (channel >= 38650 && channel <= 39649) {
+ mhs_channel_for_4g = 11;
+ } else if (channel >= 39650 && channel <= 41589) {
+ mhs_channel_for_4g = 1;
+ }
+ }
+ if (rat == CP_CHAN_INFO_RAT_MODE_NR5G) {
+ recv_msg_5g = 1;
+ if (channel >= 499200 && channel <= 537999) {
+ mhs_channel_for_5g = 1;
+ }
+ }
+
+ WL_DBG(("[BeyondX] rat: %u, band: %u, channel: %u, mhs_channel_for_4g: %u, "
+ "mhs_channel_for_5g: %u\n", rat, band, channel,
+ mhs_channel_for_4g, mhs_channel_for_5g));
+
+ if (recv_msg_4g && recv_msg_5g) {
+ if (mhs_channel_for_4g && mhs_channel_for_5g) {
+ /* if 4G/B40 + 5G/N41, select channel 6 for MHS */
+ if (mhs_channel_for_4g == 11 && mhs_channel_for_5g == 1) {
+ g_mhs_chan_for_cpcoex = 6;
+ /* if 4G(except for B40) + 5G/N41, select channel 1 for MHS */
+ } else {
+ g_mhs_chan_for_cpcoex = 1;
+ }
+ } else {
+ g_mhs_chan_for_cpcoex = mhs_channel_for_4g ? mhs_channel_for_4g :
+ mhs_channel_for_5g ? mhs_channel_for_5g : 0;
+ }
+ mhs_channel_for_4g = mhs_channel_for_5g = 0;
+ recv_msg_4g = recv_msg_5g = 0;
+ }
+ }
+
+ return 0;
}
-#endif /* DHD_LOSSLESS_ROAMING */
+static struct notifier_block wl_cfg80211_ril_bridge_notifier = {
+ .notifier_call = wl_cfg80211_ril_bridge_notifier_call,
+};
+
+static bool wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
static s32
wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
* wdev_cleanup_work call WARN_ON and make the scan done forcibly.
*/
if (wl_get_drv_status(cfg, SCANNING, dev))
- wl_notify_escan_complete(cfg, dev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
break;
}
return NOTIFY_DONE;
*/
static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
{
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
-
- if (!cfg->scan_request)
- return;
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
- if (cfg->scan_request->dev)
- wdev = cfg->scan_request->dev->ieee80211_ptr;
-#else
- wdev = cfg->scan_request->wdev;
-#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ bool p2p_connected = wl_cfgp2p_vif_created(cfg);
+ struct net_info *iter, *next;
- if (!wdev) {
- WL_ERR(("No wireless_dev present\n"));
+ if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
return;
- }
- ndev = wdev_to_wlc_ndev(wdev, cfg);
- wl_notify_escan_complete(cfg, ndev, true, true);
- WL_INFORM_MEM(("Scan aborted! \n"));
-}
+ WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
+ enable, p2p_connected, connected_cnt));
+ /* Disable FW roam when we have a concurrent P2P connection */
+ if (enable && p2p_connected && connected_cnt > 1) {
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
-{
- wl_scan_params_t *params = NULL;
- s32 params_size = 0;
- s32 err = BCME_OK;
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- if (!in_atomic()) {
- /* Our scan params only need space for 1 channel and 0 ssids */
- params = wl_cfg80211_scan_alloc_params(cfg, -1, 0, ¶ms_size);
- if (params == NULL) {
- WL_ERR(("scan params allocation failed \n"));
- err = -ENOMEM;
- } else {
- /* Do a scan abort to stop the driver's scan engine */
- err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
- if (err < 0) {
- /* scan abort can fail if there is no outstanding scan */
- WL_DBG(("scan abort failed \n"));
+ /* Mark it as to be reverted */
+ cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
+ == BCME_OK) {
+ iter->roam_off = TRUE;
+ }
+ else {
+ WL_ERR(("error to enable roam_off\n"));
+ }
}
- MFREE(cfg->osh, params, params_size);
}
}
-#ifdef WLTDLS
- if (cfg->tdls_mgmt_frame) {
- MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame = NULL;
- cfg->tdls_mgmt_frame_len = 0;
- }
-#endif /* WLTDLS */
-}
-
-static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- s32 err = BCME_OK;
- unsigned long flags;
- struct net_device *dev;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
- WL_DBG(("Enter \n"));
- BCM_REFERENCE(dhdp);
-
- mutex_lock(&cfg->scan_complete);
- if (!ndev) {
- WL_ERR(("ndev is null\n"));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->escan_info.ndev != ndev) {
- WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
- cfg->escan_info.ndev, ndev));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->scan_request) {
- dev = bcmcfg_to_prmry_ndev(cfg);
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->scan_request->dev != cfg->p2p_net)
- dev = cfg->scan_request->dev;
-#elif defined(WL_CFG80211_P2P_DEV_IF)
- if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
- dev = cfg->scan_request->wdev->netdev;
-#endif // endif
- }
- else {
- WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
- "doing scan_abort for ndev %p primary %p",
- ndev, bcmcfg_to_prmry_ndev(cfg)));
- dev = ndev;
- }
- if (fw_abort && !in_atomic())
- wl_cfg80211_scan_abort(cfg);
- if (timer_pending(&cfg->scan_timeout))
- del_timer_sync(&cfg->scan_timeout);
- cfg->scan_enq_time = 0;
-#if defined(ESCAN_RESULT_PATCH)
- if (likely(cfg->scan_request)) {
- cfg->bss_list = wl_escan_get_buf(cfg, aborted);
- wl_inform_bss(cfg);
- }
-#endif /* ESCAN_RESULT_PATCH */
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
-#ifdef WL_SCHED_SCAN
- if (cfg->sched_scan_req && !cfg->scan_request) {
- if (!aborted) {
- WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy, 0);
-#else
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+ else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
+ cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (iter->roam_off != WL_INVALID) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
+ == BCME_OK) {
+ iter->roam_off = FALSE;
+ }
+ else {
+ WL_ERR(("error to disable roam_off\n"));
+ }
+ }
+ }
}
-
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
- cfg->sched_scan_running = FALSE;
- }
-#endif /* WL_SCHED_SCAN */
- if (likely(cfg->scan_request)) {
- WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
- wl_notify_scan_done(cfg, aborted);
- cfg->scan_request = NULL;
}
- if (p2p_is_on(cfg))
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, dev);
-
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-out:
- mutex_unlock(&cfg->scan_complete);
- return err;
+ return;
}
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
-#ifndef WL_DRV_AVOID_SCANCACHE
-static void
-wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
-{
- int idx;
- for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
- int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
- if (bss->RSSI < candidate[idx].RSSI) {
- if (len)
- memcpy(&candidate[idx + 1], &candidate[idx],
- sizeof(removal_element_t) * len);
- candidate[idx].RSSI = bss->RSSI;
- candidate[idx].length = bss->length;
- memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
- return;
- }
- }
-}
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+ u32 ctl_chan = 0;
+ u32 chanspec = 0;
+ u32 pre_ctl_chan = 0;
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ cfg->vsdb_mode = false;
-static void
-wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
- wl_bss_info_t *bi)
-{
- int idx1, idx2;
- int total_delete_len = 0;
- for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
- wl_bss_info_t *bss = NULL;
- if (candidate[idx1].RSSI >= bi->RSSI)
- continue;
- for (idx2 = 0; idx2 < list->count; idx2++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
- list->bss_info;
- if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- candidate[idx1].RSSI == bss->RSSI &&
- candidate[idx1].length == dtoh32(bss->length)) {
- u32 delete_len = dtoh32(bss->length);
- WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
- MAC2STRDBG(bss->BSSID.octet)));
- if (idx2 < list->count -1) {
- memmove((u8 *)bss, (u8 *)bss + delete_len,
- list->buflen - cur_len - delete_len);
+ if (connected_cnt <= 1) {
+ return;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ ctl_chan = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ wl_update_prof(cfg, iter->ndev, NULL,
+ &ctl_chan, WL_PROF_CHAN);
}
- list->buflen -= delete_len;
- list->count--;
- total_delete_len += delete_len;
- /* if delete_len is greater than or equal to result length */
- if (total_delete_len >= bi->length) {
- return;
+ if (!cfg->vsdb_mode) {
+ if (!pre_ctl_chan && ctl_chan)
+ pre_ctl_chan = ctl_chan;
+ else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+ cfg->vsdb_mode = true;
+ }
}
- break;
}
- cur_len += dtoh32(bss->length);
}
}
+ WL_MSG("wlan", "%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
+ return;
}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-#ifdef WL_DRV_AVOID_SCANCACHE
-static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
- u32 bi_length)
+int
+wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg)
{
- u32 ret;
- u8 *p2p_dev_addr = NULL;
+ struct net_info *iter, *next;
+ u32 chanspec = 0;
+ u32 band = 0;
+ u32 pre_band = 0;
+ bool is_rsdb_supported = FALSE;
+ bool rsdb_mode = FALSE;
- ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
- if (!ret) {
- return ret;
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+
+ if (!is_rsdb_supported) {
+ return 0;
}
- if (status == WLC_E_STATUS_PARTIAL) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
- if ((channel > MAXCHANNEL) || (channel <= 0)) {
- channel = WL_INVALID;
- } else {
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ band = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ band = CHSPEC_BAND(chanspec);
+ }
+
+ if (!pre_band && band) {
+ pre_band = band;
+ } else if (pre_band && (pre_band != band)) {
+ rsdb_mode = TRUE;
+ }
}
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
}
- } else {
- WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
}
+ WL_DBG(("RSDB mode is %s\n", rsdb_mode ? "enabled" : "disabled"));
- return ret;
+ return rsdb_mode;
}
-static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
- struct net_device *ndev, const wl_event_msg_t *e, s32 status)
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+ enum wl_status state, bool set)
{
+ s32 pm = PM_FAST;
s32 err = BCME_OK;
- wl_bss_info_t *bi;
- u32 bi_length;
- bool aborted = false;
- bool fw_abort = false;
- bool notify_escan_complete = false;
-
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0) {
- goto exit;
- }
-
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
-
- if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- }
-
- if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- notify_escan_complete = true;
- }
+ u32 mode;
+ u32 chan = 0;
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = cfg->pub;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR(("busstate is DHD_BUS_DOWN!\n"));
+ return 0;
+ }
+ WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+ state, set, _net_info->pm_restore, _net_info->ndev->name));
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
- WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
- goto exit;
+ if (state != WL_STATUS_CONNECTED)
+ return 0;
+ mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+ if (set) {
+ wl_cfg80211_concurrent_roam(cfg, 1);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (mode == WL_MODE_AP) {
+ if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+ WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
+ pm = PM_OFF;
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
- bi = escan_result->bss_info;
- bi_length = dtoh32(bi->length);
- if ((!bi) ||
- (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
- WL_ERR(("Invalid escan bss info (NULL pointer)"
- "or invalid bss_info length\n"));
- goto exit;
+ wl_cfg80211_update_power_mode(_net_info->ndev);
}
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
+#if defined(WLTDLS)
+ if (wl_cfg80211_is_concurrent_mode(primary_dev)) {
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
}
+#endif /* defined(WLTDLS) */
- if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
- goto exit;
- } else {
- if (scan_req_match(cfg)) {
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- }
-#ifdef ROAM_CHANNEL_CACHE
- add_roam_cache(cfg, bi);
-#endif /* ROAM_CHANNEL_CACHE */
- err = wl_inform_single_bss(cfg, bi, false);
-#ifdef ROAM_CHANNEL_CACHE
- /* print_roam_cache(); */
- update_roam_cache(cfg, ioctl_version);
-#endif /* ROAM_CHANNEL_CACHE */
-
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- aborted = false;
- fw_abort = true;
- }
- /* Directly exit from function here and
- * avoid sending notify completion to cfg80211
- */
- goto exit;
+#ifdef DISABLE_FRAMEBURST_VSDB
+ if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
+ wl_cfg80211_is_concurrent_mode(primary_dev) &&
+ !wl_cfg80211_determine_p2p_rsdb_mode(cfg)) {
+ wl_cfg80211_set_frameburst(cfg, FALSE);
}
- } else if (status == WLC_E_STATUS_SUCCESS) {
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
+ /* Enable frameburst for
+ * STA/SoftAP concurrent mode
+ */
+ wl_cfg80211_set_frameburst(cfg, TRUE);
}
- WL_INFORM_MEM(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
-
- /* Update escan complete status */
- aborted = false;
- fw_abort = false;
-
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
-#ifdef BCMCCX
- (status == WLC_E_STATUS_CCXFASTRM) ||
-#endif /* BCMCCX */
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Handle all cases of scan abort */
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+ } else { /* clear */
+ chan = 0;
+ /* clear chan information when the net device is disconnected */
+ wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (primary_dev == _net_info->ndev) {
+ pm = PM_FAST;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
+ wl_cfg80211_update_power_mode(_net_info->ndev);
+ }
+#ifdef RTT_SUPPORT
+ }
+#endif /* RTT_SUPPORT */
}
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
-
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
-
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- /* Update escan complete status */
- aborted = true;
- fw_abort = true;
+ wl_cfg80211_concurrent_roam(cfg, 0);
+#if defined(WLTDLS)
+ if (!wl_cfg80211_is_concurrent_mode(primary_dev)) {
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
}
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
+#endif /* defined(WLTDLS) */
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
+#if defined(DISABLE_FRAMEBURST_VSDB)
+ if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE)) {
+ wl_cfg80211_set_frameburst(cfg, TRUE);
}
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
- }
-
- /* Notify escan complete status */
- if (notify_escan_complete) {
- wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ (cfg->ap_oper_channel <= CH_MAX_2G_CHANNEL)) {
+ /* Disable frameburst for stand-alone 2GHz SoftAP */
+ wl_cfg80211_set_frameburst(cfg, FALSE);
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
}
-
-exit:
return err;
-
}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-#ifdef WL_BCNRECV
-/* Beacon recv results handler sending to upper layer */
-static s32
-wl_bcnrecv_result_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- wl_bss_info_v109_2_t *bi, uint32 scan_status)
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
{
- s32 err = BCME_OK;
- struct wiphy *wiphy = NULL;
- wl_bcnrecv_result_t *bcn_recv = NULL;
- struct timespec ts;
- if (!bi) {
- WL_ERR(("%s: bi is NULL\n", __func__));
- err = BCME_NORESOURCE;
- goto exit;
- }
- if ((bi->length - bi->ie_length) < sizeof(wl_bss_info_v109_2_t)) {
- WL_ERR(("bi info version doesn't support bcn_recv attributes\n"));
- goto exit;
- }
+ int err = 0;
- if (scan_status == WLC_E_STATUS_RXBCN) {
- wiphy = cfg->wdev->wiphy;
- if (!wiphy) {
- WL_ERR(("wiphy is NULL\n"));
- err = BCME_NORESOURCE;
- goto exit;
- }
- bcn_recv = (wl_bcnrecv_result_t *)MALLOCZ(cfg->osh, sizeof(*bcn_recv));
- if (unlikely(!bcn_recv)) {
- WL_ERR(("Failed to allocate memory\n"));
- return -ENOMEM;
- }
- memcpy((char *)bcn_recv->SSID, (char *)bi->SSID, DOT11_MAX_SSID_LEN);
- memcpy(&bcn_recv->BSSID, &bi->BSSID, ETH_ALEN);
- bcn_recv->channel = bi->chanspec;
- bcn_recv->beacon_interval = bi->beacon_period;
+ /* Init roam timer */
+ init_timer_compat(&cfg->roam_timeout, wl_roam_timeout, cfg);
- /* kernal timestamp */
- get_monotonic_boottime(&ts);
- bcn_recv->system_time = ((u64)ts.tv_sec*1000000)
- + ts.tv_nsec / 1000;
- bcn_recv->timestamp[0] = bi->timestamp[0];
- bcn_recv->timestamp[1] = bi->timestamp[1];
- if (bcn_recv) {
- /* sending an req info to upper layer */
- wl_cfgvendor_send_async_event(wiphy, bcmcfg_to_prmry_ndev(cfg),
- BRCM_VENDOR_EVENT_BEACON_RECV, bcn_recv, sizeof(*bcn_recv));
- }
- } else {
- WL_DBG(("Ignoring Escan Event:%d \n", scan_status));
- }
-exit:
- if (bcn_recv) {
- MFREE(cfg->osh, bcn_recv, sizeof(*bcn_recv));
- }
return err;
}
-#endif /* WL_BCNRECV */
+#endif /* DHD_LOSSLESS_ROAMING */
-static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
{
- s32 err = BCME_OK;
- s32 status = ntoh32(e->status);
- wl_escan_result_t *escan_result;
- struct net_device *ndev = NULL;
-#ifndef WL_DRV_AVOID_SCANCACHE
- wl_bss_info_t *bi;
- u32 bi_length;
- const wifi_p2p_ie_t * p2p_ie;
- const u8 *p2p_dev_addr = NULL;
- wl_scan_results_t *list;
- wl_bss_info_t *bss = NULL;
- u32 i;
-#endif /* WL_DRV_AVOID_SCANCACHE */
- u16 channel;
- struct ieee80211_supported_band *band;
-
- WL_DBG((" enter event type : %d, status : %d \n",
- ntoh32(e->event_type), ntoh32(e->status)));
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
- mutex_lock(&cfg->usr_sync);
- /* P2P SCAN is coming from primary interface */
- if (wl_get_p2p_status(cfg, SCANNING)) {
- if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
- ndev = cfg->afx_hdl->dev;
- else
- ndev = cfg->escan_info.ndev;
- }
- escan_result = (wl_escan_result_t *)data;
+ cfg->scan_request = NULL;
+ cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+#ifdef DISABLE_BUILTIN_ROAM
+ cfg->roam_on = false;
+#else
+ cfg->roam_on = true;
+#endif /* DISABLE_BUILTIN_ROAM */
+ cfg->active_scan = true;
+ cfg->rf_blocked = false;
+ cfg->vsdb_mode = false;
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ cfg->wlfc_on = false;
+#endif /* BCMSDIO || BCMDBUS */
+ cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
+ cfg->disable_roam_event = false;
+ /* register interested state */
+ set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+ spin_lock_init(&cfg->cfgdrv_lock);
+ mutex_init(&cfg->ioctl_buf_sync);
+ init_waitqueue_head(&cfg->netif_change_event);
+ init_waitqueue_head(&cfg->wps_done_event);
+ init_completion(&cfg->send_af_done);
+ init_completion(&cfg->iface_disable);
+ mutex_init(&cfg->usr_sync);
+ mutex_init(&cfg->event_sync);
+ mutex_init(&cfg->if_sync);
+ mutex_init(&cfg->scan_sync);
+ mutex_init(&cfg->pm_sync);
+ mutex_init(&cfg->in4way_sync);
+#ifdef WLTDLS
+ mutex_init(&cfg->tdls_sync);
+#endif /* WLTDLS */
#ifdef WL_BCNRECV
- if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
- /* handle beacon recv scan results */
- wl_bss_info_v109_2_t *bi_info;
- bi_info = (wl_bss_info_v109_2_t *)escan_result->bss_info;
- err = wl_bcnrecv_result_handler(cfg, cfgdev, bi_info, status);
- goto exit;
- }
+ mutex_init(&cfg->bcn_sync);
#endif /* WL_BCNRECV */
- if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
- WL_ERR_RLMT(("escan is not ready. ndev:%p drv_scan_status 0x%x"
- " e_type %d e_states %d\n",
- ndev, wl_get_drv_status(cfg, SCANNING, ndev),
- ntoh32(e->event_type), ntoh32(e->status)));
- goto exit;
+#ifdef WL_WPS_SYNC
+ wl_init_wps_reauth_sm(cfg);
+#endif /* WL_WPS_SYNC */
+ wl_init_eq(cfg);
+ err = wl_init_priv_mem(cfg);
+ if (err)
+ return err;
+ if (wl_create_event_handler(cfg))
+ return -ENOMEM;
+ wl_init_event_handler(cfg);
+ err = wl_init_scan(cfg);
+ if (err)
+ return err;
+#ifdef DHD_LOSSLESS_ROAMING
+ err = wl_init_roam_timeout(cfg);
+ if (err) {
+ return err;
}
+#endif /* DHD_LOSSLESS_ROAMING */
+ wl_init_conf(cfg->conf);
+ wl_init_prof(cfg, ndev);
+ wl_link_down(cfg);
+ DNGL_FUNC(dhd_cfg80211_init, (cfg));
+#ifdef WL_NAN
+ cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
+ init_waitqueue_head(&cfg->ndp_if_change_event);
+ mutex_init(&cfg->nancfg.nan_sync);
+ init_waitqueue_head(&cfg->nancfg.nan_event_wait);
+#endif /* WL_NAN */
+ cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
+ cfg->pmk_list->pmkids.count = 0;
+ cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
+ return err;
+}
-#ifndef WL_DRV_AVOID_SCANCACHE
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if (!escan_result) {
- WL_ERR(("Invalid escan result (NULL pointer)\n"));
- goto exit;
- }
- if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
- (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
- WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
- goto exit;
- }
- if (dtoh16(escan_result->bss_count) != 1) {
- WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
- goto exit;
- }
- bi = escan_result->bss_info;
- if (!bi) {
- WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
- goto exit;
- }
- bi_length = dtoh32(bi->length);
- if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
- WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
- goto exit;
- }
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+ DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ del_timer_sync(&cfg->roam_timeout);
+#endif // endif
+ wl_deinit_priv_mem(cfg);
+ if (wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ }
+}
- /* +++++ terence 20130524: skip invalid bss */
- channel =
- bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
- if (channel <= CH_MAX_2G_CHANNEL)
- band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
- else
- band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
- if (!band) {
- WL_ERR(("No valid band\n"));
- goto exit;
- }
- if (!dhd_conf_match_channel(cfg->pub, channel))
- goto exit;
- /* ----- terence 20130524: skip invalid bss */
-
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0)
- goto exit;
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
- }
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
-
- if ((channel > MAXCHANNEL) || (channel <= 0))
- channel = WL_INVALID;
- else
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
-
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- goto exit;
- }
+#if defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
+{
+ WL_TRACE(("Enter \n"));
- } else {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
- int remove_lower_rssi = FALSE;
+ if (wl_cfgp2p_register_ndev(cfg) < 0) {
+ WL_ERR(("P2P attach failed. \n"));
+ return -ENODEV;
+ }
- bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ return 0;
+}
- list = wl_escan_get_buf(cfg, FALSE);
- if (scan_req_match(cfg)) {
-#ifdef WL_HOST_BAND_MGMT
- s32 channel_band = 0;
- chanspec_t chspec;
-#endif /* WL_HOST_BAND_MGMT */
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
- bi->ie_length)) == NULL) {
- WL_ERR(("Couldn't find P2PIE in probe"
- " response/beacon\n"));
- goto exit;
- }
-#ifdef WL_HOST_BAND_MGMT
- chspec = wl_chspec_driver_to_host(bi->chanspec);
- channel_band = CHSPEC2WLC_BAND(chspec);
-
- if ((cfg->curr_band == WLC_BAND_5G) &&
- (channel_band == WLC_BAND_2G)) {
- /* Avoid sending the GO results in band conflict */
- if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
- P2P_SEID_GROUP_ID) != NULL)
- goto exit;
- }
-#endif /* WL_HOST_BAND_MGMT */
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- if (bi_length > ESCAN_BUF_SIZE - list->buflen)
- remove_lower_rssi = TRUE;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- WL_SCAN(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
- for (i = 0; i < list->count; i++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
- : list->bss_info;
- if (!bss) {
- WL_ERR(("bss is NULL\n"));
- goto exit;
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- WL_SCAN(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
- bss->SSID, MAC2STRDBG(bss->BSSID.octet),
- i, bss->RSSI, list->count));
-
- if (remove_lower_rssi)
- wl_cfg80211_find_removal_candidate(bss, candidate);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
- == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
- bi->SSID_len == bss->SSID_len &&
- !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
-
- /* do not allow beacon data to update
- *the data recd from a probe response
- */
- if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
+static s32 wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev;
- WL_SCAN(("%s("MACDBG"), i=%d prev: RSSI %d"
- " flags 0x%x, new: RSSI %d flags 0x%x\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
- bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+ WL_DBG(("Enter \n"));
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+ else {
+ wdev = cfg->p2p_wdev;
+ if (!wdev) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+ }
- if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
- /* preserve max RSSI if the measurements are
- * both on-channel or both off-channel
- */
- WL_SCAN(("%s("MACDBG"), same onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = MAX(bss->RSSI, bi->RSSI);
- } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
- /* preserve the on-channel rssi measurement
- * if the new measurement is off channel
- */
- WL_SCAN(("%s("MACDBG"), prev onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = bss->RSSI;
- bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
- }
- if (dtoh32(bss->length) != bi_length) {
- u32 prev_len = dtoh32(bss->length);
-
- WL_SCAN(("bss info replacement"
- " is occured(bcast:%d->probresp%d)\n",
- bss->ie_length, bi->ie_length));
- WL_SCAN(("%s("MACDBG"), replacement!(%d -> %d)\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- prev_len, bi_length));
-
- if (list->buflen - prev_len + bi_length
- > ESCAN_BUF_SIZE) {
- WL_ERR(("Buffer is too small: keep the"
- " previous result of this AP\n"));
- /* Only update RSSI */
- bss->RSSI = bi->RSSI;
- bss->flags |= (bi->flags
- & WL_BSS_FLAGS_RSSI_ONCHANNEL);
- goto exit;
- }
+ wl_cfgp2p_unregister_ndev(cfg);
- if (i < list->count - 1) {
- /* memory copy required by this case only */
- memmove((u8 *)bss + bi_length,
- (u8 *)bss + prev_len,
- list->buflen - cur_len - prev_len);
- }
- list->buflen -= prev_len;
- list->buflen += bi_length;
- }
- list->version = dtoh32(bi->version);
- memcpy((u8 *)bss, (u8 *)bi, bi_length);
- goto exit;
- }
- cur_len += dtoh32(bss->length);
- }
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
- WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
- goto exit;
- }
-#else
- WL_ERR(("Buffer is too small: ignoring\n"));
- goto exit;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
- }
+ cfg->p2p_wdev = NULL;
+ cfg->p2p_net = NULL;
+ WL_DBG(("Freeing 0x%p \n", wdev));
+ kfree(wdev);
- memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
- list->version = dtoh32(bi->version);
- list->buflen += bi_length;
- list->count++;
+ return 0;
+}
+#endif
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
- goto exit;
- }
- }
+static s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct bcm_cfg80211 * cfg;
+ s32 err = 0;
+ s32 ret = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(ndev);
+ if (unlikely(!cfg)) {
+ WL_ERR(("cfg is invaild\n"));
+ return -EINVAL;
}
- else if (status == WLC_E_STATUS_SUCCESS) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
- escan_result->sync_id);
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ if (cfg->wdev) {
+ ret = wl_cfgp2p_supported(cfg, ndev);
+ if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+ cfg->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_SCAN(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
- cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!scan_req_match(cfg)) {
- WL_MSG(ndev->name, "SCAN COMPLETED: scanned AP count=%d\n",
- cfg->bss_list->count);
- }
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, false, false);
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
-#ifdef BCMCCX
- (status == WLC_E_STATUS_CCXFASTRM) ||
-#endif /* BCMCCX */
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Dump FW preserve buffer content */
- if (status == WLC_E_STATUS_ABORT) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- /* Handle all cases of scan abort */
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- wl_clr_p2p_status(cfg, SCANNING);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
- cfg->bss_list->count));
- }
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
- /* If sync_id is not matching, then the abort might have
- * come for the old scan req or for the in-driver initiated
- * scan. So do abort for scan_req for which sync_id is
- * matching.
- */
- WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
- "Ignore the scan abort event.\n",
- escan_result->sync_id, cfg->escan_info.cur_sync_id));
- goto exit;
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->p2p_net) {
+ /* Update MAC addr for p2p0 interface here. */
+ memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ cfg->p2p_net->dev_addr[0] |= 0x02;
+ WL_MSG(cfg->p2p_net->name, "p2p_dev_addr="MACDBG "\n",
+ MAC2STRDBG(cfg->p2p_net->dev_addr));
+ } else {
+ WL_ERR(("p2p_net not yet populated."
+ " Couldn't update the MAC Address for p2p0 \n"));
+ return -ENODEV;
+ }
+#endif /* WL_ENABLE_P2P_IF */
+ cfg->p2p_supported = true;
+ } else if (ret == 0) {
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
} else {
- /* sync id is matching, abort the scan */
- WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
- cfg->escan_info.cur_sync_id));
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
- }
-#else
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
- } else {
- /* If there is no pending host initiated scan, do nothing */
- WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
- }
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
- "scanned AP count=%d\n",
- cfg->bss_list->count));
+ /* SDIO bus timeout */
+ err = -ENODEV;
+ goto fail;
}
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
}
- wl_escan_increment_sync_id(cfg, 2);
}
-#else /* WL_DRV_AVOID_SCANCACHE */
- err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
-#endif /* WL_DRV_AVOID_SCANCACHE */
-exit:
- mutex_unlock(&cfg->usr_sync);
+ wl_set_drv_status(cfg, READY, ndev);
+fail:
return err;
}
-static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
{
- u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
- bool p2p_connected = wl_cfgp2p_vif_created(cfg);
- struct net_info *iter, *next;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
- if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
- return;
+ if (!wdev || !wdev->wiphy)
+ return NULL;
- WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
- enable, p2p_connected, connected_cnt));
- /* Disable FW roam when we have a concurrent P2P connection */
- if (enable && p2p_connected && connected_cnt > 1) {
+ return wiphy_priv(wdev->wiphy);
+}
- /* Mark it as to be reverted */
- cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- if (iter->ndev && iter->wdev &&
- iter->wdev->iftype == NL80211_IFTYPE_STATION) {
- if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
- == BCME_OK) {
- iter->roam_off = TRUE;
- }
- else {
- WL_ERR(("error to enable roam_off\n"));
- }
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
+s32
+wl_cfg80211_net_attach(struct net_device *primary_ndev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(primary_ndev);
+
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return BCME_ERROR;
}
- else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
- cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- if (iter->ndev && iter->wdev &&
- iter->wdev->iftype == NL80211_IFTYPE_STATION) {
- if (iter->roam_off != WL_INVALID) {
- if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
- == BCME_OK) {
- iter->roam_off = FALSE;
- }
- else {
- WL_ERR(("error to disable roam_off\n"));
- }
- }
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
+#ifdef WL_STATIC_IF
+ /* Register dummy n/w iface. FW init will happen only from dev_open */
+ if (wl_cfg80211_register_static_if(cfg, NL80211_IFTYPE_STATION,
+ WL_STATIC_IFNAME_PREFIX) == NULL) {
+ WL_ERR(("static i/f registration failed!\n"));
+ return BCME_ERROR;
}
-
- return;
+#endif /* WL_STATIC_IF */
+ return BCME_OK;
}
-static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
{
- struct net_info *iter, *next;
- u32 ctl_chan = 0;
- u32 chanspec = 0;
- u32 pre_ctl_chan = 0;
- u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
- cfg->vsdb_mode = false;
+ struct wireless_dev *wdev;
+ struct bcm_cfg80211 *cfg;
+ s32 err = 0;
+ struct device *dev;
+ u16 bssidx = 0;
+ u16 ifidx = 0;
+ dhd_pub_t *dhd = (struct dhd_pub *)(context);
- if (connected_cnt <= 1) {
- return;
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- /* p2p discovery iface ndev could be null */
- if (iter->ndev) {
- chanspec = 0;
- ctl_chan = 0;
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_chspec_driver_to_host(chanspec);
- ctl_chan = wf_chspec_ctlchan(chanspec);
- wl_update_prof(cfg, iter->ndev, NULL,
- &ctl_chan, WL_PROF_CHAN);
- }
- if (!cfg->vsdb_mode) {
- if (!pre_ctl_chan && ctl_chan)
- pre_ctl_chan = ctl_chan;
- else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
- cfg->vsdb_mode = true;
- }
- }
- }
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
- WL_MSG("wlan", "%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
- return;
-}
-
-int
-wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg)
-{
- struct net_info *iter, *next;
- u32 chanspec = 0;
- u32 band = 0;
- u32 pre_band = 0;
- bool is_rsdb_supported = FALSE;
- bool rsdb_mode = FALSE;
-
- is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
-
- if (!is_rsdb_supported) {
- return 0;
- }
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- /* p2p discovery iface ndev could be null */
- if (iter->ndev) {
- chanspec = 0;
- band = 0;
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_chspec_driver_to_host(chanspec);
- band = CHSPEC_BAND(chanspec);
- }
-
- if (!pre_band && band) {
- pre_band = band;
- } else if (pre_band && (pre_band != band)) {
- rsdb_mode = TRUE;
- }
- }
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
- WL_DBG(("RSDB mode is %s\n", rsdb_mode ? "enabled" : "disabled"));
-
- return rsdb_mode;
-}
-
-static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
- enum wl_status state, bool set)
-{
- s32 pm = PM_FAST;
- s32 err = BCME_OK;
- u32 mode;
- u32 chan = 0;
- struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- dhd_pub_t *dhd = cfg->pub;
-#ifdef RTT_SUPPORT
- rtt_status_info_t *rtt_status;
-#endif /* RTT_SUPPORT */
- if (dhd->busstate == DHD_BUS_DOWN) {
- WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__));
- return 0;
- }
- WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
- state, set, _net_info->pm_restore, _net_info->ndev->name));
-
- if (state != WL_STATUS_CONNECTED)
- return 0;
- mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
- if (set) {
- wl_cfg80211_concurrent_roam(cfg, 1);
- wl_cfg80211_determine_vsdb_mode(cfg);
- if (mode == WL_MODE_AP) {
- if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
- WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
- }
- pm = PM_OFF;
- if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
- sizeof(pm))) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n",
- _net_info->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n",
- _net_info->ndev->name, err));
-
- wl_cfg80211_update_power_mode(_net_info->ndev);
- }
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
-#if defined(WLTDLS)
- if (wl_cfg80211_is_concurrent_mode(primary_dev)) {
- err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
- }
-#endif /* defined(WLTDLS) */
-
-#ifdef DISABLE_FRAMEBURST_VSDB
- if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
- wl_cfg80211_is_concurrent_mode(primary_dev) &&
- !wl_cfg80211_determine_p2p_rsdb_mode(cfg)) {
- wl_cfg80211_set_frameburst(cfg, FALSE);
- }
-#endif /* DISABLE_FRAMEBURST_VSDB */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
- /* Enable frameburst for
- * STA/SoftAP concurrent mode
- */
- wl_cfg80211_set_frameburst(cfg, TRUE);
- }
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
- } else { /* clear */
- chan = 0;
- /* clear chan information when the net device is disconnected */
- wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
- wl_cfg80211_determine_vsdb_mode(cfg);
- if (primary_dev == _net_info->ndev) {
- pm = PM_FAST;
-#ifdef RTT_SUPPORT
- rtt_status = GET_RTTSTATE(dhd);
- if (rtt_status->status != RTT_ENABLED) {
-#endif /* RTT_SUPPORT */
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
- sizeof(pm))) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n",
- _net_info->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n",
- _net_info->ndev->name, err));
-
- wl_cfg80211_update_power_mode(_net_info->ndev);
- }
-#ifdef RTT_SUPPORT
- }
-#endif /* RTT_SUPPORT */
- }
- wl_cfg80211_concurrent_roam(cfg, 0);
-#if defined(WLTDLS)
- if (!wl_cfg80211_is_concurrent_mode(primary_dev)) {
- err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
- }
-#endif /* defined(WLTDLS) */
-
-#if defined(DISABLE_FRAMEBURST_VSDB)
- if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE)) {
- wl_cfg80211_set_frameburst(cfg, TRUE);
- }
-#endif /* DISABLE_FRAMEBURST_VSDB */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
- (cfg->ap_oper_channel <= CH_MAX_2G_CHANNEL)) {
- /* Disable frameburst for stand-alone 2GHz SoftAP */
- wl_cfg80211_set_frameburst(cfg, FALSE);
- }
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
- }
- return err;
-}
-static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
-{
- int err = 0;
-
- cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_init_sync_id(cfg);
-
- /* Init scan_timeout timer */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&cfg->scan_timeout, wl_scan_timeout, 0);
-#else
- init_timer(&cfg->scan_timeout);
- cfg->scan_timeout.data = (unsigned long) cfg;
- cfg->scan_timeout.function = wl_scan_timeout;
-#endif
-
- return err;
-}
-
-#ifdef DHD_LOSSLESS_ROAMING
-static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
-{
- int err = 0;
-
- /* Init roam timer */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&cfg->roam_timeout, wl_roam_timeout, 0);
-#else
- init_timer(&cfg->roam_timeout);
- cfg->roam_timeout.data = (unsigned long) cfg;
- cfg->roam_timeout.function = wl_roam_timeout;
-#endif
-
- return err;
-}
-#endif /* DHD_LOSSLESS_ROAMING */
-
-static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
-{
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- s32 err = 0;
-
- cfg->scan_request = NULL;
- cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
-#ifdef DISABLE_BUILTIN_ROAM
- cfg->roam_on = false;
-#else
- cfg->roam_on = true;
-#endif /* DISABLE_BUILTIN_ROAM */
- cfg->active_scan = true;
- cfg->rf_blocked = false;
- cfg->vsdb_mode = false;
-#if defined(BCMSDIO) || defined(BCMDBUS)
- cfg->wlfc_on = false;
-#endif /* BCMSDIO || BCMDBUS */
- cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
- cfg->disable_roam_event = false;
- /* register interested state */
- set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
- spin_lock_init(&cfg->cfgdrv_lock);
- mutex_init(&cfg->ioctl_buf_sync);
- init_waitqueue_head(&cfg->netif_change_event);
- init_waitqueue_head(&cfg->wps_done_event);
- init_completion(&cfg->send_af_done);
- init_completion(&cfg->iface_disable);
- mutex_init(&cfg->usr_sync);
- mutex_init(&cfg->event_sync);
- mutex_init(&cfg->scan_complete);
- mutex_init(&cfg->if_sync);
- mutex_init(&cfg->pm_sync);
- mutex_init(&cfg->in4way_sync);
-#ifdef WLTDLS
- mutex_init(&cfg->tdls_sync);
-#endif /* WLTDLS */
-#ifdef WL_BCNRECV
- mutex_init(&cfg->bcn_sync);
-#endif /* WL_BCNRECV */
-#ifdef WL_WPS_SYNC
- wl_init_wps_reauth_sm(cfg);
-#endif /* WL_WPS_SYNC */
- wl_init_eq(cfg);
- err = wl_init_priv_mem(cfg);
- if (err)
- return err;
- if (wl_create_event_handler(cfg))
- return -ENOMEM;
- wl_init_event_handler(cfg);
- err = wl_init_scan(cfg);
- if (err)
- return err;
-#ifdef DHD_LOSSLESS_ROAMING
- err = wl_init_roam_timeout(cfg);
- if (err) {
- return err;
- }
-#endif /* DHD_LOSSLESS_ROAMING */
- wl_init_conf(cfg->conf);
- wl_init_prof(cfg, ndev);
- wl_link_down(cfg);
- DNGL_FUNC(dhd_cfg80211_init, (cfg));
-#ifdef WL_NAN
- cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
- init_waitqueue_head(&cfg->ndp_if_change_event);
-#endif /* WL_NAN */
-#ifdef WL_NAN
- mutex_init(&cfg->nancfg.nan_sync);
- init_waitqueue_head(&cfg->nancfg.nan_event_wait);
-#endif /* WL_NAN */
- cfg->pmk_list->pmkids.length = sizeof(pmkid_list_v2_t) - sizeof(pmkid_v2_t);
- cfg->pmk_list->pmkids.version = PMKID_LIST_VER_2;
- return err;
-}
-
-static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
-{
- DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
- wl_destroy_event_handler(cfg);
- wl_flush_eq(cfg);
- wl_link_down(cfg);
- if (cfg->scan_timeout.function)
- del_timer_sync(&cfg->scan_timeout);
-#ifdef DHD_LOSSLESS_ROAMING
- if (cfg->roam_timeout.function)
- del_timer_sync(&cfg->roam_timeout);
-#endif // endif
- wl_deinit_priv_mem(cfg);
- if (wl_cfg80211_netdev_notifier_registered) {
- wl_cfg80211_netdev_notifier_registered = FALSE;
- unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
- }
-}
-
-#if defined(WL_ENABLE_P2P_IF)
-static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
-{
- WL_TRACE(("Enter \n"));
-
- if (wl_cfgp2p_register_ndev(cfg) < 0) {
- WL_ERR(("P2P attach failed. \n"));
- return -ENODEV;
- }
-
- return 0;
-}
-
-static s32 wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
-{
- struct wireless_dev *wdev;
-
- WL_DBG(("Enter \n"));
- if (!cfg) {
- WL_ERR(("Invalid Ptr\n"));
- return -EINVAL;
- }
- else {
- wdev = cfg->p2p_wdev;
- if (!wdev) {
- WL_ERR(("Invalid Ptr\n"));
- return -EINVAL;
- }
- }
-
- wl_cfgp2p_unregister_ndev(cfg);
-
- cfg->p2p_wdev = NULL;
- cfg->p2p_net = NULL;
- WL_DBG(("Freeing 0x%p \n", wdev));
- kfree(wdev);
-
- return 0;
-}
-#endif
-
-static s32 wl_cfg80211_attach_post(struct net_device *ndev)
-{
- struct bcm_cfg80211 * cfg;
- s32 err = 0;
- s32 ret = 0;
- WL_INFORM_MEM(("In\n"));
- if (unlikely(!ndev)) {
- WL_ERR(("ndev is invaild\n"));
- return -ENODEV;
- }
- cfg = wl_get_cfg(ndev);
- if (unlikely(!cfg)) {
- WL_ERR(("cfg is invaild\n"));
- return -EINVAL;
- }
- if (!wl_get_drv_status(cfg, READY, ndev)) {
- if (cfg->wdev) {
- ret = wl_cfgp2p_supported(cfg, ndev);
- if (ret > 0) {
-#if !defined(WL_ENABLE_P2P_IF)
- cfg->wdev->wiphy->interface_modes |=
- (BIT(NL80211_IFTYPE_P2P_CLIENT)|
- BIT(NL80211_IFTYPE_P2P_GO));
-#endif /* !WL_ENABLE_P2P_IF */
- if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
- goto fail;
-
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->p2p_net) {
- /* Update MAC addr for p2p0 interface here. */
- memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
- cfg->p2p_net->dev_addr[0] |= 0x02;
- WL_MSG(cfg->p2p_net->name, "p2p_dev_addr="MACDBG "\n",
- MAC2STRDBG(cfg->p2p_net->dev_addr));
- } else {
- WL_ERR(("p2p_net not yet populated."
- " Couldn't update the MAC Address for p2p0 \n"));
- return -ENODEV;
- }
-#endif /* WL_ENABLE_P2P_IF */
- cfg->p2p_supported = true;
- } else if (ret == 0) {
- if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
- goto fail;
- } else {
- /* SDIO bus timeout */
- err = -ENODEV;
- goto fail;
- }
- }
- }
- wl_set_drv_status(cfg, READY, ndev);
-fail:
- return err;
-}
-
-struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
-{
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
-
- if (!wdev || !wdev->wiphy)
- return NULL;
-
- return wiphy_priv(wdev->wiphy);
-}
-
-s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
-{
- struct wireless_dev *wdev;
- struct bcm_cfg80211 *cfg;
- s32 err = 0;
- struct device *dev;
- u16 bssidx = 0;
- u16 ifidx = 0;
- dhd_pub_t *dhd = (struct dhd_pub *)(context);
-
- WL_TRACE(("In\n"));
- if (!ndev) {
- WL_ERR(("ndev is invaild\n"));
- return -ENODEV;
+ WL_TRACE(("In\n"));
+ if (!ndev) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
}
WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
dev = wl_cfg80211_get_parent_dev();
#endif
INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
-#ifdef WL_IRQSET
- INIT_DELAYED_WORK(&cfg->irq_set_work, wl_irq_set_work_handler);
-#endif /* WL_IRQSET */
- wl_cfg80211_set_bcmcfg(cfg);
+#ifdef WL_NAN
+ WL_DBG(("NAN: Armed wl_cfgnan_delayed_disable work\n"));
+ INIT_DELAYED_WORK(&cfg->nan_disable, wl_cfgnan_delayed_disable);
+#endif /* WL_NAN */
cfg->rssi_sum_report = FALSE;
-#ifdef WL_STATIC_IF
- /* Register dummy n/w iface. FW init will happen only from dev_open */
- if (wl_cfg80211_register_static_if(cfg, WL_IF_TYPE_AP,
- WL_STATIC_IFNAME_PREFIX) == NULL) {
- WL_ERR(("static i/f registration failed!\n"));
- err = -ENODEV;
- goto cfg80211_attach_out;
- }
-#endif /* WL_STATIC_IF */
-
return err;
cfg80211_attach_out:
return;
}
wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-#ifdef WL_IRQSET
- cancel_delayed_work_sync(&cfg->irq_set_work);
-#endif /* WL_IRQSET */
#if defined(COEX_DHCP)
wl_cfg80211_btcoex_deinit();
WL_DBG(("Exit\n"));
}
+#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
+void wl_cfg80211_register_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (!wl_cfg80211_ril_bridge_notifier_registered) {
+ s32 err = 0;
+ wl_cfg80211_ril_bridge_notifier_registered = TRUE;
+ err = register_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ if (err) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ WL_ERR(("Failed to register ril_notifier! %d\n", err));
+ }
+ }
+}
+
+void wl_cfg80211_unregister_dev_ril_bridge_event_notifier()
+{
+ WL_DBG(("Enter\n"));
+ if (wl_cfg80211_ril_bridge_notifier_registered) {
+ wl_cfg80211_ril_bridge_notifier_registered = FALSE;
+ unregister_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ }
+}
+#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+
static void wl_print_event_data(struct bcm_cfg80211 *cfg,
uint32 event_type, const wl_event_msg_t *e)
{
WL_DBG(("Enter \n"));
BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
- cfg->wl_evt_hdlr_entry_time = OSL_SYSUPTIME_US();
+ cfg->wl_evt_hdlr_entry_time = OSL_LOCALTIME_NS();
DHD_EVENT_WAKE_LOCK(cfg->pub);
while ((e = wl_deq_event(cfg))) {
- cfg->wl_evt_deq_time = OSL_SYSUPTIME_US();
+ s32 status = ntoh32(e->emsg.status);
+ u32 event_type = ntoh32(e->emsg.event_type);
+ bool scan_cmplt_evt = (event_type == WLC_E_ESCAN_RESULT) &&
+ ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT));
+
+ cfg->wl_evt_deq_time = OSL_LOCALTIME_NS();
+ if (scan_cmplt_evt) {
+ cfg->scan_deq_time = OSL_LOCALTIME_NS();
+ }
/* Print only critical events to avoid too many prints */
wl_print_event_data(cfg, e->etype, &e->emsg);
ntoh32(e->emsg.reason)));
cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
&e->emsg, e->edata);
+ if (scan_cmplt_evt) {
+ cfg->scan_hdlr_cmplt_time = OSL_LOCALTIME_NS();
+ }
}
} else {
WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
mutex_unlock(&cfg->if_sync);
fail:
wl_put_event(cfg, e);
- cfg->wl_evt_hdlr_exit_time = OSL_SYSUPTIME_US();
+ if (scan_cmplt_evt) {
+ cfg->scan_cmplt_time = OSL_LOCALTIME_NS();
+ }
+ cfg->wl_evt_hdlr_exit_time = OSL_LOCALTIME_NS();
}
DHD_EVENT_WAKE_UNLOCK(cfg->pub);
}
+/*
+* Generic API to handle critical events which doesnt need
+* cfg enquening and sleepable API calls.
+*/
+s32
+wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ const wl_event_msg_t * e)
+{
+ s32 ret = BCME_ERROR;
+ u32 event_type = ntoh32(e->event_type);
+
+ if (event_type >= WLC_E_LAST) {
+ return BCME_ERROR;
+ }
+
+ switch (event_type) {
+ case WLC_E_NAN_CRITICAL: {
+#ifdef WL_NAN
+ if (ntoh32(e->reason) == WL_NAN_EVENT_STOP) {
+ ret =
+ wl_cfgvendor_nan_send_async_disable_resp(cfg->static_ndev->ieee80211_ptr);
+ }
+#endif /* WL_NAN */
+ break;
+ }
+ default:
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
void
wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
{
return;
}
- if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
- WL_ERR(("during IF change, ignore event %d\n", event_type));
- return;
- }
-
if (event_type == WLC_E_IF) {
/* Don't process WLC_E_IF events in wl_cfg80211 layer */
return;
return;
}
+ /* Handle wl_cfg80211_critical_events */
+ if (wl_cfg80211_handle_critical_events(cfg, e) == BCME_OK) {
+ return;
+ }
+
if (event_type == WLC_E_PFN_NET_FOUND) {
WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
}
if ((event_type == WLC_E_ESCAN_RESULT) &&
((status == WLC_E_STATUS_SUCCESS) ||
(status == WLC_E_STATUS_ABORT))) {
- cfg->scan_enq_time = OSL_SYSUPTIME_US();
+ cfg->scan_enq_time = OSL_LOCALTIME_NS();
WL_INFORM_MEM(("Enqueing escan completion (%d). WQ state:0x%x \n",
status, work_busy(&cfg->event_work)));
}
data_len = 0;
if (data)
data_len = ntoh32(msg->datalen);
- evtq_size = sizeof(struct wl_event_q) + data_len;
+ evtq_size = (uint32)(sizeof(struct wl_event_q) + data_len);
e = (struct wl_event_q *)MALLOCZ(cfg->osh, evtq_size);
if (unlikely(!e)) {
WL_ERR(("event alloc failed\n"));
infra = 1;
break;
case WL_IF_TYPE_MONITOR:
- case WL_IF_TYPE_AWDL:
case WL_IF_TYPE_NAN:
/* Intentionall fall through */
default:
u8 *pbuf = NULL;
bool dfs_radar_disabled = FALSE;
-#define LOCAL_BUF_LEN 1024
+#define LOCAL_BUF_LEN 2048
pbuf = (u8 *)MALLOCZ(cfg->osh, LOCAL_BUF_LEN);
if (pbuf == NULL) {
WL_ERR(("failed to allocate local buf\n"));
s32 cur_band = -1;
struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
- memset(bandlist, 0, sizeof(bandlist));
+ bzero(bandlist, sizeof(bandlist));
err = wldev_ioctl_get(dev, WLC_GET_BANDLIST, bandlist,
sizeof(bandlist));
if (unlikely(err)) {
/* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
bands[index]->vht_cap.cap |=
(7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
- WL_DBG(("%s band[%d] vht_enab=%d vht_cap=%08x "
+ WL_DBG(("__wl_update_wiphybands band[%d] vht_enab=%d vht_cap=%08x "
"vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
- __FUNCTION__, index,
+ index,
bands[index]->vht_cap.vht_supported,
bands[index]->vht_cap.cap,
bands[index]->vht_cap.vht_mcs.rx_mcs_map,
#ifdef WL_HOST_BAND_MGMT
s32 ret = 0;
#endif /* WL_HOST_BAND_MGMT */
- struct net_info *netinfo = NULL;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
+#if defined(WL_NANP2P)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif // endif
#ifdef WLTDLS
u32 tdls;
#endif /* WLTDLS */
u16 wl_iftype = 0;
u16 wl_mode = 0;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
WL_DBG(("In\n"));
if (unlikely(err))
return err;
+#if 0
+ /* terence 20180108: this patch will cause to kernel panic with below
+ * steps in Android 4.4 with kernel 3.4
+ * insmod bcmdhd.ko; hostapd /data/misc/wifi/hostapd.conf
+ */
/* Always bring up interface in STA mode.
* Did observe , if previous SofAP Bringup/cleanup
* is not done properly, iftype is stuck with AP mode.
* So during next wlan0 up, forcing the type to STA
*/
netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ if (!netinfo) {
+ WL_ERR(("there is no netinfo\n"));
+ return -ENODEV;
+ }
ndev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
netinfo->iftype = WL_IF_TYPE_STA;
+#endif
if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
return -EINVAL;
}
}
+ err = wldev_iovar_getbuf(ndev, "wlc_ver", NULL, 0,
+ &cfg->wlc_ver, sizeof(wl_wlc_version_t), NULL);
+ if (likely(!err)) {
+ WL_INFORM(("wl version. Major: %d\n",
+ cfg->wlc_ver.wlc_ver_major));
+ if ((cfg->wlc_ver.wlc_ver_major >= MIN_ESCAN_PARAM_V2_FW_MAJOR) &&
+ (wldev_iovar_getbuf(ndev, "scan_ver", NULL, 0,
+ ioctl_buf, sizeof(ioctl_buf), NULL) == BCME_OK)) {
+ WL_INFORM_MEM(("scan_params v2\n"));
+ /* use scan_params ver2 */
+ cfg->scan_params_v2 = true;
+ }
+ } else {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore on unsupported chips */
+ err = BCME_OK;
+ } else {
+ WL_ERR(("wlc_ver query failed. err: %d\n", err));
+ return err;
+ }
+ }
#ifdef DHD_LOSSLESS_ROAMING
if (timer_pending(&cfg->roam_timeout)) {
del_timer_sync(&cfg->roam_timeout);
cfg->tdls_supported = true;
}
#endif /* WLTDLS */
-#ifdef WL_FILS
- if (wl_is_fils_supported(wdev->netdev)) {
- wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
- }
-#endif /* WL_FILS */
-
- if (!fw_ap_select) {
- /*
- * If FW ROAM flag is advertised, upper layer doesn't provide the
- * bssid & freq in the connect command. However, kernel ver >= 3.15,
- * provides bssid_hint & freq_hint which can be used by the firmware.
- * fw_ap_select variable determines whether FW selects the AP or the
- * user space selects the target AP within the given ESS.
- */
- wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#ifdef WL_IFACE_MGMT
+#ifdef CUSTOM_IF_MGMT_POLICY
+ cfg->iface_data.policy = CUSTOM_IF_MGMT_POLICY;
+#else
+ cfg->iface_data.policy = WL_IF_POLICY_DEFAULT;
+#endif /* CUSTOM_IF_MGMT_POLICY */
+#endif /* WL_IFACE_MGMT */
+#ifdef WL_NAN
+#ifdef WL_NANP2P
+ if (FW_SUPPORTED(dhd, nanp2p)) {
+ /* Enable NANP2P concurrent support */
+ cfg->conc_disc = WL_NANP2P_CONC_SUPPORT;
+ WL_INFORM_MEM(("nan + p2p conc discovery is supported\n"));
+ cfg->nan_p2p_supported = true;
}
+#endif /* WL_NANP2P */
+#endif /* WL_NAN */
INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
wl_set_drv_status(cfg, READY, ndev);
static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
- unsigned long flags;
struct net_info *iter, *next;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
/* Delete pm_enable_work */
wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-#ifdef WL_IRQSET
- cancel_delayed_work_sync(&cfg->irq_set_work);
-#endif /* WL_IRQSET */
if (cfg->p2p_supported) {
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
#endif /* PROP_TXSTATUS_VSDB */
}
+#ifdef WL_NAN
+ mutex_lock(&cfg->if_sync);
+ /* Cancel pending nan disable work if any */
+ if (delayed_work_pending(&cfg->nan_disable)) {
+ WL_DBG(("Unarm the nan_disable work\n"));
+ cancel_delayed_work_sync(&cfg->nan_disable);
+ }
+ cfg->nancfg.disable_reason = NAN_BUS_IS_DOWN;
+ wl_cfgnan_disable(cfg);
+ mutex_unlock(&cfg->if_sync);
+#endif /* WL_NAN */
+
if (!dhd_download_fw_on_driverload) {
/* For built-in drivers/other drivers that do reset on
* "ifconfig <primary_iface> down", cleanup any left
/* Clear used mac addr mask */
cfg->vif_macaddr_mask = 0;
-#ifdef WL_NAN
- err = wl_cfgnan_disable(cfg, NAN_BUS_IS_DOWN);
- if (err != BCME_OK) {
- WL_ERR(("failed to disable nan, error[%d]\n", err));
- }
-#endif /* WL_NAN */
-
if (dhd->up)
{
/* If primary BSS is operational (for e.g SoftAP), bring it down */
}
/* clear all the security setting on primary Interface */
- wl_cfg80211_clear_security(cfg);
- }
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ wl_cfg80211_clear_security(cfg);
+ }
+
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) /* p2p discovery iface is null */
wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
#ifdef P2P_LISTEN_OFFLOADING
wl_cfg80211_p2plo_deinit(cfg);
#endif /* P2P_LISTEN_OFFLOADING */
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
- if (cfg->scan_request) {
- wl_notify_scan_done(cfg, true);
- cfg->scan_request = NULL;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ /* cancel and notify scan complete, if scan request is pending */
+ wl_cfg80211_cancel_scan(cfg);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev ptr could be null */
if (iter->ndev == NULL)
continue;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ WL_INFORM_MEM(("wl_cfg80211_down. connection state bit status: [%u:%u:%u:%u]\n",
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
+
if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
}
+
+ if ((iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+
+ u8 *latest_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct cfg80211_bss *bss = CFG80211_GET_BSS(wiphy, NULL, latest_bssid,
+ wdev->ssid, wdev->ssid_len);
+
+ BCM_REFERENCE(bss);
+
+ CFG80211_CONNECT_RESULT(ndev,
+ latest_bssid, bss, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
wl_clr_drv_status(cfg, READY, iter->ndev);
wl_clr_drv_status(cfg, SCANNING, iter->ndev);
wl_clr_drv_status(cfg, NESTED_CONNECT, iter->ndev);
wl_clr_drv_status(cfg, CFG80211_CONNECT, iter->ndev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
NL80211_IFTYPE_STATION;
#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
}
#endif /* DISABLE_PM_BCNRX */
+#ifdef WL_CHAN_UTIL
+ interr = wl_cfg80211_start_bssload_report(net);
+ if (unlikely(interr)) {
+ WL_ERR(("%s: Failed to start bssload_report eventing, err=%d\n",
+ __FUNCTION__, interr));
+ }
+#endif /* WL_CHAN_UTIL */
mutex_unlock(&cfg->usr_sync);
#ifdef WLAIBSS_MCHAN
bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
#endif /* WLAIBSS_MCHAN */
-
-#ifdef DUAL_STA_STATIC_IF
- /* Static Interface support is currently supported only for STA only builds (without P2P) */
- wl_cfg80211_create_iface(cfg->wdev->wiphy, WL_IF_TYPE_STA, NULL, "wlan%d");
-#endif /* DUAL_STA_STATIC_IF */
-
return err;
}
dhd = (dhd_pub_t *)(cfg->pub);
if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
- WL_ERR(("%s, Invalid hang reason 0x%x\n",
- __FUNCTION__, dhd->hang_reason));
+ WL_ERR(("wl_cfg80211_hang, Invalid hang reason 0x%x\n",
+ dhd->hang_reason));
dhd->hang_reason = HANG_REASON_UNKNOWN;
}
WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
return err;
}
-static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+void
+wl_cfg80211_sta_ifdown(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ WL_DBG(("In\n"));
+
+ if (cfg) {
+ /* cancel scan if anything pending */
+ wl_cfg80211_cancel_scan(cfg);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ }
+}
+
+void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
{
unsigned long flags;
void *rptr = NULL;
if (!profile)
return NULL;
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SEC:
rptr = &profile->sec;
case WL_PROF_CHAN:
rptr = &profile->channel;
break;
+ case WL_PROF_LATEST_BSSID:
+ rptr = profile->latest_bssid;
+ break;
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
if (!rptr)
WL_ERR(("invalid item (%d)\n", item));
return rptr;
if (!profile)
return WL_INVALID;
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SSID:
ssid = (const wlc_ssid_t *) data;
- memset(profile->ssid.SSID, 0,
+ bzero(profile->ssid.SSID,
sizeof(profile->ssid.SSID));
profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
if (data)
memcpy(profile->bssid, data, ETHER_ADDR_LEN);
else
- memset(profile->bssid, 0, ETHER_ADDR_LEN);
+ bzero(profile->bssid, ETHER_ADDR_LEN);
break;
case WL_PROF_SEC:
memcpy(&profile->sec, data, sizeof(profile->sec));
case WL_PROF_CHAN:
profile->channel = *(const u32*)data;
break;
+ case WL_PROF_LATEST_BSSID:
+ if (data) {
+ memcpy_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ data, ETHER_ADDR_LEN);
+ } else {
+ memset_s(profile->latest_bssid, sizeof(profile->latest_bssid),
+ 0, ETHER_ADDR_LEN);
+ }
+ break;
default:
err = -EOPNOTSUPP;
break;
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
if (err == -EOPNOTSUPP)
WL_ERR(("unsupported item (%d)\n", item));
struct wl_ie *ie = wl_to_ie(cfg);
ie->offset = 0;
+ bzero(ie->buf, sizeof(ie->buf));
}
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
}
static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool roam)
+ bool update_ssid)
{
u8 *ssidie;
int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
int32 remaining_ie_buf_len, available_buffer_len, unused_buf_len;
/* cfg80211_find_ie defined in kernel returning const u8 */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
/* ERROR out if
* 1. No ssid IE is FOUND or
remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
unused_buf_len = WL_EXTRA_BUF_MAX - (4 + bi->length + *ie_size);
if (ssidie[1] > available_buffer_len) {
- WL_ERR_MEM(("%s: skip wl_update_hidden_ap_ie : overflow\n", __FUNCTION__));
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: skip wl_update_hidden_ap_ie : overflow\n"));
return;
}
if (ssidie[1] != ssid_len) {
if (ssidie[1]) {
- WL_INFORM_MEM(("%s: Wrong SSID len: %d != %d\n",
- __FUNCTION__, ssidie[1], bi->SSID_len));
+ WL_ERR_RLMT(("wl_update_hidden_ap_ie: Wrong SSID len: %d != %d\n",
+ ssidie[1], bi->SSID_len));
}
- if ((roam && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
+ /*
+ * The bss info in firmware gets updated from beacon and probe resp.
+ * In case of hidden network, the bss_info that got updated by beacon,
+ * will not carry SSID and this can result in cfg80211_get_bss not finding a match.
+ * so include the SSID element.
+ */
+ if ((update_ssid && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
WL_INFORM_MEM(("Changing the SSID Info.\n"));
memmove(ssidie + ssid_len + 2,
(ssidie + 2) + ssidie[1],
*ie_size = *ie_size + ssid_len - ssidie[1];
ssidie[1] = ssid_len;
} else if (ssid_len < ssidie[1]) {
- WL_ERR_MEM(("%s: Invalid SSID len: %d < %d\n",
- __FUNCTION__, bi->SSID_len, ssidie[1]));
+ WL_ERR_MEM(("wl_update_hidden_ap_ie: Invalid SSID len: %d < %d\n",
+ bi->SSID_len, ssidie[1]));
}
return;
}
{
unsigned long flags;
- spin_lock_irqsave(&cfg->eq_lock, flags);
+ WL_CFG_EQ_LOCK(&cfg->eq_lock, flags);
return flags;
}
static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
{
- spin_unlock_irqrestore(&cfg->eq_lock, flags);
+ WL_CFG_EQ_UNLOCK(&cfg->eq_lock, flags);
}
static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
return -1;
if (!p2p_is_on(cfg)) {
get_primary_mac(cfg, &primary_mac);
+#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
memcpy((void *)&p2pdev_addr, (void *)&primary_mac, ETHER_ADDR_LEN);
} else {
memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
}
#ifdef WLTDLS
-static s32
+s32
wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data) {
case WLC_E_TDLS_PEER_DISCONNECTED :
if (cfg->tdls_mgmt_frame) {
MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame = NULL;
cfg->tdls_mgmt_frame_len = 0;
cfg->tdls_mgmt_freq = 0;
}
#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
{
s32 ret = 0;
-#ifdef WLTDLS
+#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
struct bcm_cfg80211 *cfg;
tdls_wfd_ie_iovar_t info;
- memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+ bzero(&info, sizeof(info));
cfg = wl_get_cfg(dev);
#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
* using tdls_wfd_ie iovar
*/
case WLAN_TDLS_SET_PROBE_WFD_IE:
- WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_PROBE_WFD_IE\n"));
info.mode = TDLS_WFD_PROBE_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
memcpy(&info.data, buf, len);
info.length = len;
break;
case WLAN_TDLS_SET_SETUP_WFD_IE:
- WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_SETUP_WFD_IE\n"));
info.mode = TDLS_WFD_IE_TX;
+
+ if (len > sizeof(info.data)) {
+ return -EINVAL;
+ }
memcpy(&info.data, buf, len);
info.length = len;
break;
case WLAN_TDLS_SET_WFD_ENABLED:
- WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_ENABLED\n"));
dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
goto out;
case WLAN_TDLS_SET_WFD_DISABLED:
- WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_DISABLED\n"));
dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
goto out;
default:
}
out:
-#endif /* WLTDLS */
+#endif /* TDLS_MSG_ONLY_WFD && WLTDLS */
return ret;
}
dhd_pub_t *dhdp;
bool tdls_auto_mode = false;
dhdp = (dhd_pub_t *)(cfg->pub);
- memset(&info, 0, sizeof(tdls_iovar_t));
+ bzero(&info, sizeof(tdls_iovar_t));
if (peer) {
memcpy(&info.ea, peer, ETHER_ADDR_LEN);
} else {
*/
if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
info.mode = TDLS_MANUAL_EP_WFD_TPQ;
- WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_tdls_oper: TDLS TUNNELED PRBOBE REQUEST\n"));
} else {
info.mode = TDLS_MANUAL_EP_DISCOVERY;
}
if (!cfg->p2p->on) {
/* Turn on Discovery interface */
get_primary_mac(cfg, &primary_mac);
+#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
p2p_on(cfg) = true;
ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
if (unlikely(ret)) {
ret = BCME_OK;
goto done;
}
- ret = wl_notify_escan_complete(cfg, ndev, true, true);
- if (ret < 0) {
- WL_ERR(("set scan abort failed, error = %d\n", ret));
- ret = BCME_OK; // terence 20140115: fix escan_complete error
- goto done;
- }
+
+ wl_cfg80211_cancel_scan(cfg);
done:
return ret;
struct bcm_cfg80211 *cfg = NULL;
struct net_device *ndev = NULL;
- memset(cmd, 0, total_len);
+ bzero(cmd, total_len);
cfg = wl_get_cfg(dev);
buf = (u8 *)MALLOC(cfg->osh, CHANSPEC_BUF_SIZE);
uint i, tokens, log_on = 0;
size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
- memset(tbuf, 0, sizeof(tbuf));
- memset(sublog, 0, sizeof(sublog));
+ bzero(tbuf, sizeof(tbuf));
+ bzero(sublog, sizeof(sublog));
if (copy_from_user(&tbuf, userbuf, minsize)) {
return -EFAULT;
}
if (colon != NULL)
*colon = '\0';
while ((token = strsep(¶ms, " ")) != NULL) {
- memset(sublog, 0, sizeof(sublog));
+ bzero(sublog, sizeof(sublog));
if (token == NULL || !*token)
break;
if (*token == '\0')
char *param;
char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)];
uint i;
- memset(tbuf, 0, sizeof(tbuf));
+ bzero(tbuf, sizeof(tbuf));
param = &tbuf[0];
for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
0, NULL) == BCME_OK) {
memcpy(mac->octet, ioctl_buf, ETHER_ADDR_LEN);
} else {
- memset(mac->octet, 0, ETHER_ADDR_LEN);
+ bzero(mac->octet, ETHER_ADDR_LEN);
}
}
static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
}
#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
-#ifdef WL11U
-static bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
-{
- bcm_tlv_t *ie;
-
-/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
- * is used everywhere and changing its prototype to take const qualifier needs
- * a massive change to all its callers...
- */
-
- if ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_INTERWORKING_ID))) {
- return ie;
- }
- return NULL;
-}
-
-static s32
-wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
-{
- ie_setbuf_t ie_setbuf;
-
- WL_DBG(("clear interworking IE\n"));
-
- memset(&ie_setbuf, 0, sizeof(ie_setbuf_t));
-
- ie_setbuf.ie_buffer.iecount = htod32(1);
- ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
-
- return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
-}
-
-static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len)
-{
- s32 err = BCME_OK;
- s32 buf_len;
- ie_setbuf_t *ie_setbuf;
- ie_getbuf_t ie_getbufp;
- char getbuf[WLC_IOCTL_SMLEN];
-
- if (ie_id != DOT11_MNG_INTERWORKING_ID) {
- WL_ERR(("unsupported (id=%d)\n", ie_id));
- return BCME_UNSUPPORTED;
- }
-
- /* access network options (1 octet) is the mandatory field */
- if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
- WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
- return BCME_BADARG;
- }
-
- /* Validate the pktflag parameter */
- if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
- VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
- VNDR_IE_CUSTOM_FLAG))) {
- WL_ERR(("invalid packet flag 0x%x\n", pktflag));
- return BCME_BADARG;
- }
-
- buf_len = sizeof(ie_setbuf_t) + data_len - 1;
-
- ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
- if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
- sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
- == BCME_OK) {
- if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
- WL_DBG(("skip to set interworking IE\n"));
- return BCME_OK;
- }
- }
-
- /* if already set with previous values, delete it first */
- if (cfg->wl11u) {
- if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
- return err;
- }
- }
-
- ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
- if (!ie_setbuf) {
- WL_ERR(("Error allocating buffer for IE\n"));
- return -ENOMEM;
- }
- strncpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
- ie_setbuf->cmd[sizeof(ie_setbuf->cmd) - 1] = '\0';
-
- /* Buffer contains only 1 IE */
- ie_setbuf->ie_buffer.iecount = htod32(1);
- /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
- ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
-
- /* Now, add the IE to the buffer */
- ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
- memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
-
- if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
- == BCME_OK) {
- WL_DBG(("set interworking IE\n"));
- cfg->wl11u = TRUE;
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
- }
-
- MFREE(cfg->osh, ie_setbuf, buf_len);
- return err;
-}
-#endif /* WL11U */
-
#ifdef WL_HOST_BAND_MGMT
s32
wl_cfg80211_set_band(struct net_device *ndev, int band)
return -EINVAL;
}
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ BCM_REFERENCE(dhdp);
+ DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
+ dhd_net2idx(dhdp->info, ndev), 0);
ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
if (ret < 0) {
WL_ERR(("WLC_DISASSOC error %d\n", ret));
return err;
}
-int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
-{
- struct net_device *ndev = NULL;
- unsigned long flags;
- int clear_flag = 0;
- int ret = 0;
-
- WL_TRACE(("Enter\n"));
-
- if (!cfg || !cfgdev)
- return -EINVAL;
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
-#else
- if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
-#endif // endif
- {
- wl_notify_scan_done(cfg, true);
- cfg->scan_request = NULL;
- clear_flag = 1;
- }
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
- if (clear_flag)
- wl_clr_drv_status(cfg, SCANNING, ndev);
-
- return ret;
-}
-
bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
if (!cfg) {
+ /* When interface is created using wl
+ * ndev->ieee80211_ptr will be NULL.
+ */
return NULL;
}
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
dhd_pub_t *dhd;
BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
WL_DBG(("Enter \n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
/* p2p discovery iface ndev could be null */
if (iter->ndev) {
if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
DHD_PM_WAKE_UNLOCK(cfg->pub);
}
int bytes_written = -1;
if (total_len < FBT_KEYLEN) {
- WL_ERR(("%s: Insufficient buffer \n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_get_fbt_key: Insufficient buffer \n"));
goto end;
}
if (cfg) {
memcpy(key, cfg->fbt_key, FBT_KEYLEN);
bytes_written = FBT_KEYLEN;
} else {
- memset(key, 0, FBT_KEYLEN);
- WL_ERR(("%s: Failed to copy KCK and KEK \n", __FUNCTION__));
+ bzero(key, FBT_KEYLEN);
+ WL_ERR(("wl_cfg80211_get_fbt_key: Failed to copy KCK and KEK \n"));
}
prhex("KCK, KEK", (uchar *)key, FBT_KEYLEN);
end:
const bcm_tlv_t *ie;
struct parsed_vndr_ie_info *parsed_info;
u32 count = 0;
- s32 remained_len;
+ u32 remained_len;
- remained_len = (s32)len;
- memset(vndr_ies, 0, sizeof(*vndr_ies));
+ remained_len = len;
+ bzero(vndr_ies, sizeof(*vndr_ies));
WL_DBG(("---> len %d\n", len));
ie = (const bcm_tlv_t *) parse;
while (ie) {
if (count >= MAX_VNDR_IE_NUMBER)
break;
- if (ie->id == DOT11_MNG_VS_ID) {
+ if (ie->id == DOT11_MNG_VS_ID || (ie->id == DOT11_MNG_ID_EXT_ID)) {
vndrie = (const vndr_ie_t *) ie;
- /* len should be bigger than OUI length + one data length at least */
- if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
- WL_ERR(("%s: invalid vndr ie. length is too small %d\n",
- __FUNCTION__, vndrie->len));
- goto end;
- }
- /* if wpa or wme ie, do not add ie */
- if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
- ((vndrie->data[0] == WPA_OUI_TYPE) ||
- (vndrie->data[0] == WME_OUI_TYPE))) {
- CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
- goto end;
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ /* len should be bigger than sizeof ID extn field at least */
+ if (vndrie->len < MIN_VENDOR_EXTN_IE_LEN) {
+ WL_ERR(("%s: invalid vndr extn ie."
+ " length %d\n",
+ __FUNCTION__, vndrie->len));
+ goto end;
+ }
+ } else {
+ /* len should be bigger than OUI length +
+ * one data length at least
+ */
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("wl_cfg80211_parse_vndr_ies:"
+ " invalid vndr ie. length is too small %d\n",
+ vndrie->len));
+ goto end;
+ }
+
+ /* if wpa or wme ie, do not add ie */
+ if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+ ((vndrie->data[0] == WPA_OUI_TYPE) ||
+ (vndrie->data[0] == WME_OUI_TYPE))) {
+ CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+ goto end;
+ }
}
parsed_info = &vndr_ies->ie_info[count++];
parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
vndr_ies->count = count;
-
- WL_DBG(("\t ** OUI "MACOUIDBG", type 0x%02x len:%d\n",
- MACOUI2STRDBG(parsed_info->vndrie.oui),
- parsed_info->vndrie.data[0], parsed_info->ie_len));
+ if (ie->id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("\t ** Vendor Extension ie id: 0x%02x, len:%d\n",
+ ie->id, parsed_info->ie_len));
+ } else {
+ WL_DBG(("\t ** OUI "MACOUIDBG", type 0x%02x len:%d\n",
+ MACOUI2STRDBG(parsed_info->vndrie.oui),
+ parsed_info->vndrie.data[0], parsed_info->ie_len));
+ }
}
end:
ie = bcm_next_tlv(ie, &remained_len);
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return TRUE;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return FALSE;
}
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- oui_entry = (wl_vndr_oui_entry_t *)MALLOC(cfg->osh, sizeof(*oui_entry));
+ oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
if (oui_entry == NULL) {
WL_ERR(("alloc failed\n"));
return FALSE;
memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
INIT_LIST_HEAD(&oui_entry->list);
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return TRUE;
}
wl_vndr_oui_entry_t *oui_entry = NULL;
unsigned long flags;
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
while (!list_empty(&cfg->vndr_oui_list)) {
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
+ GCC_DIAGNOSTIC_POP();
if (oui_entry) {
list_del(&oui_entry->list);
- MFREE(cfg->osh, oui_entry, sizeof(*oui_entry));
+ kfree(oui_entry);
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
}
static int
}
if (vndr_oui) {
- spin_lock_irqsave(&cfg->vndr_oui_sync, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (remained_buf_len < VNDR_OUI_STR_LEN) {
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
return BCME_ERROR;
}
pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
remained_buf_len -= VNDR_OUI_STR_LEN;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- spin_unlock_irqrestore(&cfg->vndr_oui_sync, flags);
+ WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
}
return vndr_oui_num;
struct net_info *iter, *next;
WL_DBG(("clear management vendor IEs \n"));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
wl_cfg80211_clear_per_bss_ies(cfg, iter->wdev);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
return 0;
}
struct net_info *netinfo;
struct wireless_dev *wdev;
+ if (!cfgdev) {
+ WL_ERR(("cfgdev is NULL\n"));
+ return -EINVAL;
+ }
+
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
wdev = cfgdev_to_wdev(cfgdev);
}
/* Clear the global buffer */
- memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+ bzero(g_mgmt_ie_buf, sizeof(g_mgmt_ie_buf));
curr_ie_buf = g_mgmt_ie_buf;
ies = &netinfo->bss.ies;
mgmt_ie_len = &ies->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
break;
+ case VNDR_IE_DISASSOC_FLAG :
+ mgmt_ie_buf = ies->disassoc_ie;
+ mgmt_ie_len = &ies->disassoc_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->disassoc_ie);
+ break;
default:
mgmt_ie_buf = NULL;
mgmt_ie_len = NULL;
}
#endif /* WL_MBO || WL_OCE */
- WL_DBG(("DELETED ID : %d, Len: %d , OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("DELETED VENDOR EXTN ID : %d, TYPE: %d Len: %d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG(("DELETED ID : %d, Len: %d , OUI:"MACOUIDBG"\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ }
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
}
}
#endif /* WL_MBO || WL_OCE */
- WL_DBG(("ADDED ID : %d, Len: %d(%d), OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- vndrie_info->ie_len - 2,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
+ WL_DBG(("ADDED VENDOR EXTN ID : %d, TYPE = %d, Len: %d\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.len));
+ } else {
+ WL_DBG(("ADDED ID : %d, Len: %d(%d), OUI:"MACOUIDBG"\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->ie_len - 2,
+ MACOUI2STRDBG(vndrie_info->vndrie.oui)));
+ }
del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
/* if acl == NULL, macmode is still disabled.. */
if (macmode == MACLIST_MODE_DISABLED) {
if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list"
+ " failed error=%d\n", ret));
return ret;
}
macnum = acl->n_acl_entries;
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- WL_ERR(("%s : invalid number of MAC address entries %d\n",
- __FUNCTION__, macnum));
+ WL_ERR(("wl_cfg80211_set_mac_acl: invalid number of MAC address entries %d\n",
+ macnum));
return -1;
}
list = (struct maclist *)MALLOC(cfg->osh, sizeof(int) +
sizeof(struct ether_addr) * macnum);
if (!list) {
- WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+ WL_ERR(("wl_cfg80211_set_mac_acl: failed to allocate memory\n"));
return -1;
}
}
/* set the list */
if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list failed error=%d\n", ret));
MFREE(cfg->osh, list, sizeof(int) +
sizeof(struct ether_addr) * macnum);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
cfg80211_chandef_create(chandef, chan, chan_type);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
- \
\
0)))
chaninfo->freq = freq;
u32 freq;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
struct cfg80211_chan_def chandef;
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
- \
- \
- 0)))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
struct chan_info chaninfo;
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
return;
}
#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0)) */
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
if (wl_chspec_chandef(chanspec, &chandef, wiphy))
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
- \
- \
- 0)))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
cfg80211_ch_switch_notify(dev, &chandef);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
- \
- \
- 0)))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
freq = chan_info.freq;
cfg80211_ch_switch_notify(dev, freq, chan_info.chan_type);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec));
+ WL_MSG(dev->name, "Channel switch notification for freq: %d chanspec: 0x%x\n",
+ freq, chanspec);
return;
}
#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
WL_INFORM_MEM(("(%s) AP channel:%d chspec:0x%x \n",
ndev->name, channel, chanspec));
+
+#ifdef SUPPORT_AP_BWCTRL
+ wl_update_apchan_bwcap(cfg, ndev, chanspec);
+#endif /* SUPPORT_AP_BWCTRL */
+
if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) {
/*
* If cached channel is different from the channel indicated
int error = 0;
u32 chanspec = 0;
struct net_device *ndev = NULL;
+ struct ether_addr bssid;
WL_DBG(("Enter\n"));
if (unlikely(e->status)) {
if (likely(cfgdev)) {
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ /* Get association state if not AP and then query chanspec */
+ if (!((wl_get_mode_by_netdev(cfg, ndev)) == WL_MODE_AP)) {
+ error = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (error) {
+ WL_ERR(("CSA on %s. Not associated. error=%d\n",
+ ndev->name, error));
+ return BCME_ERROR;
+ }
+ }
+
error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
if (unlikely(error)) {
WL_ERR(("Get chanspec error: %d \n", error));
/* For AP/GO role */
wl_ap_channel_ind(cfg, ndev, chanspec);
} else {
+ /* STA/GC roles */
+ if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ WL_ERR(("CSA on %s. Not associated.\n", ndev->name));
+ return BCME_ERROR;
+ }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
wdev = cfg->p2p_wdev;
}
- if (wdev && cfg->down_disc_if) {
+ if (wdev) {
wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
- cfg->down_disc_if = FALSE;
}
}
#endif /* WL_CFG80211_P2P_DEV_IF */
return err;
}
- if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("seting gtk_key_info failed code=%d\n", err));
- return err;
+ if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting gtk_key_info failed code=%d\n", err));
+ return err;
+ }
+
+ WL_DBG(("Exit\n"));
+ return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
+static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 bssidx;
+
+ pmk.key_len = conf->pmk_len;
+ if (pmk.key_len > sizeof(pmk.key)) {
+ ret = -EINVAL;
+ return ret;
+ }
+ pmk.flags = 0;
+ ret = memcpy_s(&pmk.key, sizeof(pmk.key), conf->pmk, conf->pmk_len);
+ if (ret) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = -EINVAL;
+ return ret;
+ }
+
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf_bsscfg(dev, "okc_info_pmk", pmk.key, pmk.key_len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (ret) {
+ WL_ERR(("wl_cfg80211_set_pmk error:%d", ret));
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa)
+{
+ int err = BCME_OK;
+ struct cfg80211_pmksa pmksa;
+
+ /* build up cfg80211_pmksa structure to use existing wl_cfg80211_update_pmksa API */
+ bzero(&pmksa, sizeof(pmksa));
+ pmksa.bssid = aa;
+
+ err = wl_cfg80211_update_pmksa(wiphy, dev, &pmksa, FALSE);
+
+ if (err) {
+ WL_ERR(("wl_cfg80211_update_pmksa err:%d\n", err));
+ err = -EINVAL;
}
- WL_DBG(("Exit\n"));
return err;
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
-#endif /* GTK_OFFLOAD_SUPPORT */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
int
}
#ifdef WLTDLS
-static s32
+s32
wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
{
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
goto exit;
} else if ((state == TDLS_STATE_AP_CREATE) ||
- (state == TDLS_STATE_NDI_CREATE)) {
+ (state == TDLS_STATE_NMI_CREATE)) {
/* We don't support tdls while AP/GO/NAN is operational */
update_reqd = true;
enable = false;
* Verify whether its a STA interface before
* we enable back tdls.
*/
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
- if ((iter->ndev) &&
- (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
+ GCC_DIAGNOSTIC_POP();
+ if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
(ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)) {
- WL_DBG(("Non STA iface operational. cfg_iftype:%d "
- "Can't enable tdls.\n",
+ WL_DBG(("Non STA iface operational. cfg_iftype:%d"
+ " Can't enable tdls.\n",
ndev->ieee80211_ptr->iftype));
err = -ENOTSUPP;
goto exit;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
/* No AP/GO found. Enable back tdls */
update_reqd = true;
enable = true;
struct net_info *iter, *next;
struct net_device *ndev = NULL;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ndev;
}
struct net_info *iter, *next;
struct net_device *ndev = NULL;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
ndev = iter->ndev;
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- return ndev;
-}
+ return ndev;
+}
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define WLC_RATE_FLAG 0x80
+#define RATE_MASK 0x7f
+
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR, i;
+ struct net_device *ndev = NULL;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
+ }
+
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
+ }
+
+ /* Host delivers target rate in the unit of 500kbps */
+ /* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ if ((rs.rates[i] & RATE_MASK) == val)
+ break;
+
+ /* Valid rate has been delivered as an argument */
+ if (i < rs.count && i < WL_NUMRATES) {
+ error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
+ if (error < 0) {
+ WL_ERR(("set beacon rate failed = %d\n", error));
+ return BCME_ERROR;
+ }
+ } else {
+ WL_ERR(("Rate is invalid"));
+ return BCME_BADARG;
+ }
+
+ return BCME_OK;
+}
+
+int
+wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR;
+ int i, bytes_written = 0;
+ struct net_device *ndev = NULL;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
+ }
+
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
+ }
+
+ /* Delivers basic rate in the unit of 500kbps to host */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%d ", rs.rates[i] & RATE_MASK);
+
+ /* Remove last space in the command buffer */
+ if (bytes_written && (bytes_written < total_len)) {
+ command[bytes_written - 1] = '\0';
+ bytes_written--;
+ }
+
+ return bytes_written;
+
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define MSEC_PER_MIN (60000L)
+
+static int
+_wl_update_ap_rps_params(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ rpsnoa_iovar_params_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+
+ if (!dev)
+ return BCME_BADARG;
+
+ cfg = wl_get_cfg(dev);
+
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.param->band = WLC_BAND_ALL;
+ iovar.param->level = cfg->ap_rps_info.level;
+ iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
+ iovar.param->pps = cfg->ap_rps_info.pps;
+ iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+
+ if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL)) {
+ WL_ERR(("Failed to set rpsnoa params"));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+int
+wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ int error = BCME_ERROR;
+ int bytes_written = 0;
+ struct net_device *ndev = NULL;
+ rpsnoa_iovar_status_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u32 chanspec = 0;
+ u8 idx = 0;
+ u16 state;
+ u32 sleep;
+ u32 time_since_enable;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ error = BCME_NOTUP;
+ goto fail;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ error = BCME_NOTAP;
+ goto fail;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ error = BCME_NOTAP;
+ goto fail;
+ }
+
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.stats->band = WLC_BAND_ALL;
+
+ error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0) {
+ WL_ERR(("get ap radio pwrsave failed = %d\n", error));
+ goto fail;
+ }
+
+ /* RSDB event doesn't seem to be handled correctly.
+ * So check chanspec of AP directly from the firmware
+ */
+ error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (error < 0) {
+ WL_ERR(("get chanspec from AP failed = %d\n", error));
+ goto fail;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ if (CHSPEC_IS2G(chanspec))
+ idx = 0;
+ else if (CHSPEC_IS5G(chanspec))
+ idx = 1;
+ else {
+ error = BCME_BADCHAN;
+ goto fail;
+ }
+
+ state = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].state;
+ sleep = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_dur;
+ time_since_enable = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_avail_dur;
+
+ /* Conver ms to minute, round down only */
+ sleep = DIV_U64_BY_U32(sleep, MSEC_PER_MIN);
+ time_since_enable = DIV_U64_BY_U32(time_since_enable, MSEC_PER_MIN);
-#ifdef SUPPORT_AP_HIGHER_BEACONRATE
-#define WLC_RATE_FLAG 0x80
-#define RATE_MASK 0x7f
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "state=%d sleep=%d time_since_enable=%d", state, sleep, time_since_enable);
+ error = bytes_written;
-int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
+fail:
+ return error;
+}
+
+int
+wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
dhd_pub_t *dhdp;
- wl_rateset_args_t rs;
- int error = BCME_ERROR, i;
struct net_device *ndev = NULL;
+ rpsnoa_iovar_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ int ret = BCME_OK;
dhdp = (dhd_pub_t *)(cfg->pub);
- if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ if (!dhdp) {
+ ret = BCME_NOTUP;
+ goto exit;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
+ ret = BCME_NOTAP;
+ goto exit;
}
ndev = wl_get_ap_netdev(cfg, ifname);
if (ndev == NULL) {
WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
- }
-
- bzero(&rs, sizeof(wl_rateset_args_t));
- error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
- &rs, sizeof(wl_rateset_args_t), NULL);
- if (error < 0) {
- WL_ERR(("get rateset failed = %d\n", error));
- return error;
+ ret = BCME_NOTAP;
+ goto exit;
}
- if (rs.count < 1) {
- WL_ERR(("Failed to get rate count\n"));
- return BCME_ERROR;
- }
+ if (cfg->ap_rps_info.enable != enable) {
+ cfg->ap_rps_info.enable = enable;
+ if (enable) {
+ ret = _wl_update_ap_rps_params(ndev);
+ if (ret) {
+ WL_ERR(("Filed to update rpsnoa params\n"));
+ goto exit;
+ }
+ }
+ bzero(&iovar, sizeof(iovar));
+ bzero(smbuf, sizeof(smbuf));
- /* Host delivers target rate in the unit of 500kbps */
- /* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
- for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
- if (rs.rates[i] & WLC_RATE_FLAG)
- if ((rs.rates[i] & RATE_MASK) == val)
- break;
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.data->band = WLC_BAND_ALL;
+ iovar.data->value = (int16)enable;
- /* Valid rate has been delivered as an argument */
- if (i < rs.count && i < WL_NUMRATES) {
- error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
- if (error < 0) {
- WL_ERR(("set beacon rate failed = %d\n", error));
- return BCME_ERROR;
+ ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (ret) {
+ WL_ERR(("Failed to enable AP radio power save"));
+ goto exit;
}
- } else {
- WL_ERR(("Rate is invalid"));
- return BCME_BADARG;
+ cfg->ap_rps_info.enable = enable;
}
-
- return BCME_OK;
+exit:
+ return ret;
}
int
-wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
+wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
dhd_pub_t *dhdp;
- wl_rateset_args_t rs;
- int error = BCME_ERROR;
- int i, bytes_written = 0;
struct net_device *ndev = NULL;
dhdp = (dhd_pub_t *)(cfg->pub);
+ if (!dhdp)
+ return BCME_NOTUP;
+
if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
WL_ERR(("Not Hostapd mode\n"));
return BCME_NOTAP;
return BCME_NOTAP;
}
- bzero(&rs, sizeof(wl_rateset_args_t));
- error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
- &rs, sizeof(wl_rateset_args_t), NULL);
- if (error < 0) {
- WL_ERR(("get rateset failed = %d\n", error));
- return error;
- }
+ if (!rps)
+ return BCME_BADARG;
- if (rs.count < 1) {
- WL_ERR(("Failed to get rate count\n"));
- return BCME_ERROR;
- }
+ if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
+ return BCME_BADARG;
- /* Delivers basic rate in the unit of 500kbps to host */
- for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
- if (rs.rates[i] & WLC_RATE_FLAG)
- bytes_written += snprintf(command + bytes_written, total_len,
- "%d ", rs.rates[i] & RATE_MASK);
+ if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
+ rps->level > RADIO_PWRSAVE_LEVEL_MAX)
+ return BCME_BADARG;
- /* Remove last space in the command buffer */
- if (bytes_written && (bytes_written < total_len)) {
- command[bytes_written - 1] = '\0';
- bytes_written--;
+ if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
+ return BCME_BADARG;
+
+ if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
+ rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
+ return BCME_BADARG;
+
+ cfg->ap_rps_info.pps = rps->pps;
+ cfg->ap_rps_info.level = rps->level;
+ cfg->ap_rps_info.quiet_time = rps->quiet_time;
+ cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+
+ if (cfg->ap_rps_info.enable) {
+ if (_wl_update_ap_rps_params(ndev)) {
+ WL_ERR(("Failed to update rpsnoa params"));
+ return BCME_ERROR;
+ }
}
- return bytes_written;
+ return BCME_OK;
+}
+void
+wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
+{
+ cfg->ap_rps_info.enable = FALSE;
+ cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
+ cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
+ cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
+ cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
}
-#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_AP_RADIO_PWRSAVE
-#define MSEC_PER_MIN (60000L)
+int
+wl_cfg80211_iface_count(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_info *iter, *next;
+ int iface_count = 0;
-static int
-_wl_update_ap_rps_params(struct net_device *dev)
+ /* Return the count of network interfaces (skip netless p2p discovery
+ * interface)
+ */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ iface_count++;
+ }
+ }
+ return iface_count;
+}
+
+#ifdef SUPPORT_SET_CAC
+static void
+wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
+{
+ int ret = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("cac enable %d\n", enable));
+ if (!dhd) {
+ WL_ERR(("dhd is NULL\n"));
+ return;
+ }
+ if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
+ WLC_SET_VAR, TRUE, 0)) < 0) {
+ WL_ERR(("Failed set CAC, ret=%d\n", ret));
+ } else {
+ WL_DBG(("CAC set successfully\n"));
+ }
+ return;
+}
+#endif /* SUPPORT_SET_CAC */
+
+#ifdef SUPPORT_RSSI_SUM_REPORT
+int
+wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- rpsnoa_iovar_params_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
+ wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
+ rssi_ant_param_t *set_param = NULL;
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
- if (!dev)
- return BCME_BADARG;
+ bzero(iobuf, WLC_IOCTL_SMLEN);
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
- iovar.hdr.len = sizeof(iovar);
- iovar.param->band = WLC_BAND_ALL;
- iovar.param->level = cfg->ap_rps_info.level;
- iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
- iovar.param->pps = cfg->ap_rps_info.pps;
- iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
+ if (peer_mac) {
+ set_param = (rssi_ant_param_t *)MALLOCZ(cfg->osh, sizeof(rssi_ant_param_t));
+ err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
+ if (!err) {
+ WL_ERR(("Invalid Peer MAC format\n"));
+ err = BCME_BADARG;
+ goto fail;
+ }
+ } else {
+ WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
+ err = BCME_BADARG;
+ goto fail;
+ }
+ }
- if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL)) {
- WL_ERR(("Failed to set rpsnoa params"));
- return BCME_ERROR;
+ err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
+ (void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
+ (void *)iobuf, sizeof(iobuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get rssi info, err=%d\n", err));
+ } else {
+ memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
+ if (get_param->count == 0) {
+ WL_ERR(("Not supported on this chip\n"));
+ err = BCME_UNSUPPORTED;
+ }
+ }
+
+fail:
+ if (set_param) {
+ MFREE(cfg->osh, set_param, sizeof(rssi_ant_param_t));
}
- return BCME_OK;
+ return err;
}
int
-wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
+wl_get_rssi_logging(struct net_device *dev, void *param)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- int error = BCME_ERROR;
- int bytes_written = 0;
- struct net_device *ndev = NULL;
- rpsnoa_iovar_status_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
- u32 chanspec = 0;
- u8 idx = 0;
- u16 state;
- u32 sleep;
- u32 time_since_enable;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
+ rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
- if (!dhdp) {
- error = BCME_NOTUP;
- goto fail;
+ bzero(iobuf, WLC_IOCTL_SMLEN);
+ bzero(get_param, sizeof(*get_param));
+ err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
+ sizeof(iobuf), NULL);
+ if (err) {
+ WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
+ } else {
+ memcpy(get_param, iobuf, sizeof(*get_param));
}
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- error = BCME_NOTAP;
- goto fail;
- }
+ return err;
+}
- ndev = wl_get_ap_netdev(cfg, ifname);
+int
+wl_set_rssi_logging(struct net_device *dev, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
+ int err;
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- error = BCME_NOTAP;
- goto fail;
+ err = wldev_iovar_setbuf(dev, "rssilog", set_param,
+ sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
}
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ return err;
+}
+#endif /* SUPPORT_RSSI_SUM_REPORT */
+/* Function to flush the FW preserve buffer content
+* The buffer content is sent to host in form of events.
+*/
+void
+wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ int i;
+ int err = 0;
+ u8 buf[WLC_IOCTL_SMLEN] = {0};
+ wl_el_set_params_t set_param;
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
- iovar.hdr.len = sizeof(iovar);
- iovar.stats->band = WLC_BAND_ALL;
+ /* Set the size of data to retrieve */
+ memset(&set_param, 0, sizeof(set_param));
+ set_param.size = WLC_IOCTL_SMLEN;
- error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL);
- if (error < 0) {
- WL_ERR(("get ap radio pwrsave failed = %d\n", error));
- goto fail;
+ for (i = 0; i < dhd->event_log_max_sets; i++)
+ {
+ if ((0x01u << i) & logset_mask) {
+ set_param.set = i;
+ err = wldev_iovar_setbuf(dev, "event_log_get", &set_param,
+ sizeof(struct wl_el_set_params_s), buf, WLC_IOCTL_SMLEN,
+ NULL);
+ if (err) {
+ WL_DBG(("Failed to get fw preserve logs, err=%d\n", err));
+ }
+ }
}
+}
+#ifdef USE_WFA_CERT_CONF
+extern int g_frameburst;
+#endif /* USE_WFA_CERT_CONF */
- /* RSDB event doesn't seem to be handled correctly.
- * So check chanspec of AP directly from the firmware
- */
- error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
- if (error < 0) {
- WL_ERR(("get chanspec from AP failed = %d\n", error));
- goto fail;
- }
+int
+wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
+{
+ int ret = BCME_OK;
+ int val = enable ? 1 : 0;
- chanspec = wl_chspec_driver_to_host(chanspec);
- if (CHSPEC_IS2G(chanspec))
- idx = 0;
- else if (CHSPEC_IS5G(chanspec))
- idx = 1;
- else {
- error = BCME_BADCHAN;
- goto fail;
+#ifdef USE_WFA_CERT_CONF
+ if (!g_frameburst) {
+ WL_DBG(("Skip setting frameburst\n"));
+ return 0;
}
+#endif /* USE_WFA_CERT_CONF */
- state = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].state;
- sleep = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_dur;
- time_since_enable = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_avail_dur;
+ WL_DBG(("Set frameburst %d\n", val));
+ ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
+ if (ret < 0) {
+ WL_ERR(("Failed set frameburst, ret=%d\n", ret));
+ } else {
+ WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
+ }
- /* Conver ms to minute, round down only */
- sleep = DIV_U64_BY_U32(sleep, MSEC_PER_MIN);
- time_since_enable = DIV_U64_BY_U32(time_since_enable, MSEC_PER_MIN);
+ return ret;
+}
- bytes_written += snprintf(command + bytes_written, total_len,
- "state=%d sleep=%d time_since_enable=%d", state, sleep, time_since_enable);
- error = bytes_written;
+s32
+wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level)
+{
+ /* configure verbose level for debugging */
+ if (level) {
+ /* Enable increased verbose */
+ wl_dbg_level |= WL_DBG_DBG;
+ } else {
+ /* Disable */
+ wl_dbg_level &= ~WL_DBG_DBG;
+ }
+ WL_INFORM(("debug verbose set to %d\n", level));
-fail:
- return error;
+ return BCME_OK;
}
-int
-wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
+const u8 *
+wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- struct net_device *ndev = NULL;
- rpsnoa_iovar_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
- int ret = BCME_OK;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
+ const u8 *attrib;
+ u16 attrib_id;
+ u16 attrib_len;
- if (!dhdp) {
- ret = BCME_NOTUP;
- goto exit;
+ if (!buf) {
+ WL_ERR(("buf null\n"));
+ return NULL;
}
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- ret = BCME_NOTAP;
- goto exit;
- }
+ attrib = buf;
+ while (len >= 4) {
+ /* attribute id */
+ attrib_id = *attrib++ << 8;
+ attrib_id |= *attrib++;
+ len -= 2;
- ndev = wl_get_ap_netdev(cfg, ifname);
+ /* 2-byte little endian */
+ attrib_len = *attrib++ << 8;
+ attrib_len |= *attrib++;
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- ret = BCME_NOTAP;
- goto exit;
- }
+ len -= 2;
+ if (attrib_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return attrib;
+ }
+ if (len > attrib_len) {
+ len -= attrib_len; /* for the remaining subelt fields */
+ WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
+ attrib_id, attrib_len, len));
- if (cfg->ap_rps_info.enable != enable) {
- cfg->ap_rps_info.enable = enable;
- if (enable) {
- ret = _wl_update_ap_rps_params(ndev);
- if (ret) {
- WL_ERR(("Filed to update rpsnoa params\n"));
- goto exit;
- }
+ /* Go to next subelement */
+ attrib += attrib_len;
+ } else {
+ WL_ERR(("Incorrect Attribue:%4x attrib_len:%d\n",
+ attrib_id, attrib_len));
+ return NULL;
}
- memset(&iovar, 0, sizeof(iovar));
- memset(smbuf, 0, sizeof(smbuf));
+ }
+ return NULL;
+}
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
- iovar.hdr.len = sizeof(iovar);
- iovar.data->band = WLC_BAND_ALL;
- iovar.data->value = (int16)enable;
+uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ WL_INFORM(("dhd->hang_was_sent = %d and busstate = %d\n",
+ dhd->hang_was_sent, dhd->busstate));
+ return ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent);
+}
- ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL);
- if (ret) {
- WL_ERR(("Failed to enable AP radio power save"));
- goto exit;
+#ifdef WL_WPS_SYNC
+static void wl_wps_reauth_timeout(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ s32 inst;
+ unsigned long flags;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst >= 0) {
+ WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d! state:%d\n",
+ ndev->name, inst, cfg->wps_session[inst].state));
+ if (cfg->wps_session[inst].state == WPS_STATE_REAUTH_WAIT) {
+ /* Session should get deleted from success (linkup) or
+ * deauth case. Just in case, link reassoc failed, clear
+ * state here.
+ */
+ WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d!\n",
+ ndev->name, inst));
+ cfg->wps_session[inst].state = WPS_STATE_IDLE;
+ cfg->wps_session[inst].in_use = false;
}
- cfg->ap_rps_info.enable = enable;
}
-exit:
- return ret;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
}
-int
-wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
+static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- struct net_device *ndev = NULL;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
-
- if (!dhdp)
- return BCME_NOTUP;
+ /* Only two instances are supported as of now. one for
+ * infra STA and other for infra STA/GC.
+ */
+ int i = 0;
+ struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
+ spin_lock_init(&cfg->wps_sync);
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ /* Init scan_timeout timer */
+ init_timer_compat(&cfg->wps_session[i].timer, wl_wps_reauth_timeout, pdev);
+ cfg->wps_session[i].in_use = false;
+ cfg->wps_session[i].state = WPS_STATE_IDLE;
}
+}
- ndev = wl_get_ap_netdev(cfg, ifname);
+static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ cfg->wps_session[i].in_use = false;
+ cfg->wps_session[i].state = WPS_STATE_IDLE;
+ if (timer_pending(&cfg->wps_session[i].timer)) {
+ del_timer_sync(&cfg->wps_session[i].timer);
+ }
}
- if (!rps)
- return BCME_BADARG;
-
- if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
- return BCME_BADARG;
-
- if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
- rps->level > RADIO_PWRSAVE_LEVEL_MAX)
- return BCME_BADARG;
-
- if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
- return BCME_BADARG;
-
- if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
- rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
- return BCME_BADARG;
+}
- cfg->ap_rps_info.pps = rps->pps;
- cfg->ap_rps_info.level = rps->level;
- cfg->ap_rps_info.quiet_time = rps->quiet_time;
- cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+static s32
+wl_get_free_wps_inst(struct bcm_cfg80211 *cfg)
+{
+ int i;
- if (cfg->ap_rps_info.enable) {
- if (_wl_update_ap_rps_params(ndev)) {
- WL_ERR(("Failed to update rpsnoa params"));
- return BCME_ERROR;
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ if (!cfg->wps_session[i].in_use) {
+ return i;
}
}
-
- return BCME_OK;
+ return BCME_ERROR;
}
-void
-wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
+static s32
+wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
- cfg->ap_rps_info.enable = FALSE;
- cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
- cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
- cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
- cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
+ int i;
+
+ for (i = 0; i < WPS_MAX_SESSIONS; i++) {
+ if ((cfg->wps_session[i].in_use) &&
+ (ndev == cfg->wps_session[i].ndev)) {
+ return i;
+ }
+ }
+
+ return BCME_ERROR;
}
-#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-int
-wl_cfg80211_iface_count(struct net_device *dev)
+static s32
+wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *mac_addr)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct net_info *iter, *next;
- int iface_count = 0;
+ s32 inst;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
- /* Return the count of network interfaces (skip netless p2p discovery
- * interface)
- */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- for_each_ndev(cfg, iter, next) {
- if (iter->ndev) {
- iface_count++;
- }
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ /* Fetch and initialize a wps instance */
+ inst = wl_get_free_wps_inst(cfg);
+ if (inst == BCME_ERROR) {
+ WL_ERR(("[WPS] No free insance\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_ERROR;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- return iface_count;
+ cfg->wps_session[inst].in_use = true;
+ cfg->wps_session[inst].state = WPS_STATE_STARTED;
+ cfg->wps_session[inst].ndev = ndev;
+ cfg->wps_session[inst].mode = mode;
+ /* return check not required since both buffer lens are same */
+ (void)memcpy_s(cfg->wps_session[inst].peer_mac, ETH_ALEN, mac_addr, ETH_ALEN);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ WL_INFORM_MEM(("[%s][WPS] session created. Peer: " MACDBG "\n",
+ ndev->name, MAC2STRDBG(mac_addr)));
+ return BCME_OK;
}
-#ifdef SUPPORT_SET_CAC
static void
-wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
+wl_wps_session_del(struct net_device *ndev)
{
- int ret = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ s32 inst;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
- WL_DBG(("cac enable %d\n", enable));
- if (!dhd) {
- WL_ERR(("dhd is NULL\n"));
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+
+ /* Get current instance for the given ndev */
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ WL_DBG(("[WPS] instance match NOT found\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
return;
}
- if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
- WLC_SET_VAR, TRUE, 0)) < 0) {
- WL_ERR(("Failed set CAC, ret=%d\n", ret));
- } else {
- WL_DBG(("CAC set successfully\n"));
+
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state != WPS_STATE_DONE) {
+ WL_DBG(("[WPS] wrong state:%d\n", cur_state));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
}
- return;
+
+ /* Mark this as unused */
+ cfg->wps_session[inst].in_use = false;
+ cfg->wps_session[inst].state = WPS_STATE_IDLE;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ /* Ensure this API is called from sleepable context. */
+ if (timer_pending(&cfg->wps_session[inst].timer)) {
+ del_timer_sync(&cfg->wps_session[inst].timer);
+ }
+
+ WL_INFORM_MEM(("[%s][WPS] session deleted\n", ndev->name));
}
-#endif /* SUPPORT_SET_CAC */
-#ifdef SUPPORT_RSSI_SUM_REPORT
-int
-wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
+static void
+wl_wps_handle_ifdel(struct net_device *ndev)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
- rssi_ant_param_t *set_param = NULL;
- struct net_device *ifdev = NULL;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
- int iftype = 0;
-
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 inst;
- /* Check the interface type */
- ifdev = wl_get_netdev_by_name(cfg, ifname);
- if (ifdev == NULL) {
- WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
- err = BCME_BADARG;
- goto fail;
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ WL_DBG(("[WPS] instance match NOT found\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return;
}
+ cur_state = cfg->wps_session[inst].state;
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- iftype = ifdev->ieee80211_ptr->iftype;
- if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
- if (peer_mac) {
- set_param = (rssi_ant_param_t *)MALLOCZ(cfg->osh, sizeof(rssi_ant_param_t));
- err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
- if (!err) {
- WL_ERR(("Invalid Peer MAC format\n"));
- err = BCME_BADARG;
- goto fail;
- }
- } else {
- WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
- err = BCME_BADARG;
- goto fail;
- }
+ WL_INFORM_MEM(("[%s][WPS] state:%x\n", ndev->name, cur_state));
+ if (cur_state > WPS_STATE_IDLE) {
+ wl_wps_session_del(ndev);
}
+}
- err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
- (void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
- (void *)iobuf, sizeof(iobuf), NULL);
- if (unlikely(err)) {
- WL_ERR(("Failed to get rssi info, err=%d\n", err));
- } else {
- memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
- if (get_param->count == 0) {
- WL_ERR(("Not supported on this chip\n"));
- err = BCME_UNSUPPORTED;
- }
- }
+static s32
+wl_wps_handle_sta_linkdown(struct net_device *ndev, u16 inst)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
-fail:
- if (set_param) {
- MFREE(cfg->osh, set_param, sizeof(rssi_ant_param_t));
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down\n", ndev->name));
+ /* Drop the link down event while we are waiting for reauth */
+ return BCME_UNSUPPORTED;
+ } else if (cur_state == WPS_STATE_STARTED) {
+ /* Link down before reaching EAP-FAIL. End WPS session */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] link down after wps start\n", ndev->name));
+ } else {
+ WL_DBG(("[%s][WPS] link down in state:%d\n",
+ ndev->name, cur_state));
}
- return err;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return BCME_OK;
}
-int
-wl_get_rssi_logging(struct net_device *dev, void *param)
+static s32
+wl_wps_handle_peersta_linkdown(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
- memset(get_param, 0, sizeof(*get_param));
- err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
- sizeof(iobuf), NULL);
- if (err) {
- WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
- } else {
- memcpy(get_param, iobuf, sizeof(*get_param));
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ if (!peer_mac) {
+ WL_ERR(("Invalid arg\n"));
+ ret = BCME_ERROR;
+ goto exit;
}
- return err;
+ /* AP/GO can have multiple clients. so validate peer_mac addr
+ * and ensure states are updated only for right peer.
+ */
+ if (memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ /* Mac addr not matching. Ignore. */
+ WL_DBG(("[%s][WPS] No active WPS session"
+ "for the peer:" MACDBG "\n", ndev->name, MAC2STRDBG(peer_mac)));
+ ret = BCME_OK;
+ goto exit;
+ }
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down."
+ " Peer: " MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+#ifdef NOT_YET
+ /* Link down during REAUTH state is expected. However,
+ * if this is send up, hostapd statemachine issues a
+ * deauth down and that may pre-empt WPS reauth state
+ * at GC.
+ */
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link down. Ignore."
+ " for client:" MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+ ret = BCME_UNSUPPORTED;
+#endif // endif
+ } else if (cur_state == WPS_STATE_STARTED) {
+ /* Link down before reaching REAUTH_WAIT state. WPS
+ * session ended.
+ */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ WL_INFORM_MEM(("[%s][WPS] link down after wps start"
+ " client:" MACDBG "\n",
+ ndev->name, MAC2STRDBG(peer_mac)));
+ wps_done = true;
+ /* since we have freed lock above, return from here */
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("[%s][WPS] Unsupported state:%d",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
+exit:
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
}
-int
-wl_set_rssi_logging(struct net_device *dev, void *param)
+static s32
+wl_wps_handle_sta_linkup(struct net_device *ndev, u16 inst)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
- int err;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
- err = wldev_iovar_setbuf(dev, "rssilog", set_param,
- sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ /* WPS session succeeded. del session. */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] WPS_REAUTH link up (WPS DONE)\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("[%s][WPS] unexpected link up in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
}
-
- return err;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
}
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-#ifdef DHD_LOG_DUMP
-/* Function to flush the FW preserve buffer content
-* The buffer content is sent to host in form of events.
-*/
-void
-wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask)
+static s32
+wl_wps_handle_peersta_linkup(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- int i;
- int err = 0;
- u8 buf[WLC_IOCTL_SMLEN] = {0};
- wl_el_set_params_t set_param;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
- /* Set the size of data to retrieve */
- memset(&set_param, 0, sizeof(set_param));
- set_param.size = WLC_IOCTL_SMLEN;
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
- for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++)
- {
- if ((0x01u << i) & logset_mask) {
- set_param.set = i;
- err = wldev_iovar_setbuf(dev, "event_log_get", &set_param,
- sizeof(struct wl_el_set_params_s), buf, WLC_IOCTL_SMLEN,
- NULL);
- if (err) {
- WL_DBG(("Failed to get fw preserve logs, err=%d\n", err));
- }
- }
+ /* For AP case, check whether call came for right peer */
+ if (!peer_mac ||
+ memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ WL_ERR(("[WPS] macaddr mismatch\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ /* Mac addr not matching. Ignore. */
+ return BCME_ERROR;
}
-}
-#endif /* DHD_LOG_DUMP */
-s32
-wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level)
-{
- /* configure verbose level for debugging */
- if (level) {
- /* Enable increased verbose */
- wl_dbg_level |= WL_DBG_DBG;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ WL_INFORM_MEM(("[%s][WPS] REAUTH link up\n", ndev->name));
+ ret = BCME_OK;
} else {
- /* Disable */
- wl_dbg_level &= ~WL_DBG_DBG;
+ WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
}
- WL_INFORM(("debug verbose set to %d\n", level));
- return BCME_OK;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ return ret;
}
-s32
-wl_cfg80211_check_for_nan_support(struct bcm_cfg80211 *cfg)
+static s32
+wl_wps_handle_authorize(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (((p2p_is_on(cfg)) && (wl_get_p2p_status(cfg, SCANNING) ||
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1))) ||
- (dhd->op_mode & DHD_FLAG_HOSTAP_MODE))
- {
- WL_ERR(("p2p/softap is enabled, cannot support nan\n"));
- return FALSE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
+ s32 ret = BCME_OK;
+
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+
+ /* For AP case, check whether call came for right peer */
+ if (!peer_mac ||
+ memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ WL_ERR(("[WPS] macaddr mismatch\n"));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ /* Mac addr not matching. Ignore. */
+ return BCME_ERROR;
}
- return TRUE;
-}
-#ifdef WL_IRQSET
-static void wl_irq_set_work_handler(struct work_struct * work)
-{
- struct bcm_cfg80211 *cfg = NULL;
- BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, irq_set_work.work);
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ /* WPS session succeeded. del session. */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] Authorize done (WPS DONE)\n", ndev->name));
+ ret = BCME_OK;
+ } else {
+ WL_INFORM_MEM(("[%s][WPS] unexpected Authorize in state:%d \n",
+ ndev->name, cur_state));
+ ret = BCME_ERROR;
+ }
- if (cfg) {
- dhd_irq_set_affinity(cfg->pub);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
}
+ return ret;
}
-#endif /* WL_IRQSET */
-#ifdef WL_WPS_SYNC
-static void wl_wps_reauth_timeout(unsigned long data)
+static s32
+wl_wps_handle_reauth(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- struct net_device *ndev = (struct net_device *)data;
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- s32 inst;
unsigned long flags;
+ u16 cur_state;
+ u16 mode;
+ s32 ret = BCME_OK;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst >= 0) {
- WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d! state:%d\n",
- ndev->name, inst, cfg->wps_session[inst].state));
- if (cfg->wps_session[inst].state == WPS_STATE_REAUTH_WAIT) {
- /* Session should get deleted from success (linkup) or
- * deauth case. Just in case, link reassoc failed, clear
- * state here.
- */
- WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d!\n",
- ndev->name, inst));
- cfg->wps_session[inst].state = WPS_STATE_IDLE;
- cfg->wps_session[inst].in_use = false;
- }
- }
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
-}
-
-static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg)
-{
- /* Only two instances are supported as of now. one for
- * infra STA and other for infra STA/GC.
- */
- int i = 0;
- struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ mode = cfg->wps_session[inst].mode;
- spin_lock_init(&cfg->wps_sync);
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- /* Init scan_timeout timer */
- init_timer(&cfg->wps_session[i].timer);
- cfg->wps_session[i].timer.data = (unsigned long) pdev;
- cfg->wps_session[i].timer.function = wl_wps_reauth_timeout;
- cfg->wps_session[i].in_use = false;
- cfg->wps_session[i].state = WPS_STATE_IDLE;
+ if (((mode == WL_MODE_BSS) && (cur_state == WPS_STATE_STARTED)) ||
+ ((mode == WL_MODE_AP) && (cur_state == WPS_STATE_M8_SENT))) {
+ /* Move to reauth wait */
+ cfg->wps_session[inst].state = WPS_STATE_REAUTH_WAIT;
+ /* Use ndev to find the wps instance which fired the timer */
+ timer_set_private(&cfg->wps_session[inst].timer, ndev);
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ mod_timer(&cfg->wps_session[inst].timer,
+ jiffies + msecs_to_jiffies(WL_WPS_REAUTH_TIMEOUT));
+ WL_INFORM_MEM(("[%s][WPS] STATE_REAUTH_WAIT mode:%d Peer: " MACDBG "\n",
+ ndev->name, mode, MAC2STRDBG(peer_mac)));
+ return BCME_OK;
+ } else {
+ /* 802.1x cases */
+ WL_DBG(("[%s][WPS] EAP-FAIL\n", ndev->name));
}
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
}
-static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg)
+static s32
+wl_wps_handle_disconnect(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- int i = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- cfg->wps_session[i].in_use = false;
- cfg->wps_session[i].state = WPS_STATE_IDLE;
- if (timer_pending(&cfg->wps_session[i].timer)) {
- del_timer_sync(&cfg->wps_session[i].timer);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ /* If Disconnect command comes from user space for STA/GC,
+ * respond with event without waiting for event from fw as
+ * it would be dropped by the WPS_SYNC code.
+ */
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ if (ETHER_ISBCAST(peer_mac)) {
+ WL_DBG(("[WPS] Bcast peer. Do nothing.\n"));
+ } else {
+ /* Notify link down */
+ CFG80211_DISCONNECTED(ndev,
+ WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
+ true, GFP_ATOMIC);
}
+ } else {
+ WL_DBG(("[%s][WPS] Not valid state to report disconnected:%d",
+ ndev->name, cur_state));
+ ret = BCME_UNSUPPORTED;
}
-
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
}
static s32
-wl_get_free_wps_inst(struct bcm_cfg80211 *cfg)
+wl_wps_handle_disconnect_client(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- int i;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
+ bool wps_done = false;
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- if (!cfg->wps_session[i].in_use) {
- return i;
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ /* For GO/AP, ignore disconnect client during reauth state */
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ if (ETHER_ISBCAST(peer_mac)) {
+ /* If there is broadcast deauth, then mark wps session as ended */
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] BCAST deauth. WPS stopped.\n", ndev->name));
+ ret = BCME_OK;
+ goto exit;
+ } else if (!(memcmp(cfg->wps_session[inst].peer_mac,
+ peer_mac, ETH_ALEN))) {
+ WL_ERR(("[%s][WPS] Drop disconnect client\n", ndev->name));
+ ret = BCME_UNSUPPORTED;
}
}
- return BCME_ERROR;
+
+exit:
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return ret;
}
static s32
-wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+wl_wps_handle_connect_fail(struct net_device *ndev, u16 inst)
{
- int i;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ unsigned long flags;
+ u16 cur_state;
+ bool wps_done = false;
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- if ((cfg->wps_session[i].in_use) &&
- (ndev == cfg->wps_session[i].ndev)) {
- return i;
- }
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
+ if (cur_state == WPS_STATE_REAUTH_WAIT) {
+ cfg->wps_session[inst].state = WPS_STATE_DONE;
+ wps_done = true;
+ WL_INFORM_MEM(("[%s][WPS] Connect fail. WPS stopped.\n",
+ ndev->name));
+ } else {
+ WL_ERR(("[%s][WPS] Connect fail. state:%d\n",
+ ndev->name, cur_state));
}
-
- return BCME_ERROR;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (wps_done) {
+ wl_wps_session_del(ndev);
+ }
+ return BCME_OK;
}
static s32
-wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *mac_addr)
+wl_wps_handle_m8_sent(struct net_device *ndev, u16 inst, const u8 *peer_mac)
{
- s32 inst;
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
unsigned long flags;
+ u16 cur_state;
+ s32 ret = BCME_OK;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- /* Fetch and initialize a wps instance */
- inst = wl_get_free_wps_inst(cfg);
- if (inst == BCME_ERROR) {
- WL_ERR(("[WPS] No free insance\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return BCME_ERROR;
- }
- cfg->wps_session[inst].in_use = true;
- cfg->wps_session[inst].state = WPS_STATE_STARTED;
- cfg->wps_session[inst].ndev = ndev;
- cfg->wps_session[inst].mode = mode;
- memcpy(cfg->wps_session[inst].peer_mac, mac_addr, ETH_ALEN);
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ cur_state = cfg->wps_session[inst].state;
- WL_INFORM_MEM(("[%s][WPS] session created. Peer: " MACDBG "\n",
- ndev->name, MAC2STRDBG(mac_addr)));
- return BCME_OK;
+ if (cur_state == WPS_STATE_STARTED) {
+ /* Move to M8 sent state */
+ cfg->wps_session[inst].state = WPS_STATE_M8_SENT;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_OK;
+ } else {
+ /* 802.1x cases */
+ WL_DBG(("[%s][WPS] Not valid state to send M8\n", ndev->name));
+ }
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
}
-static void
-wl_wps_session_del(struct net_device *ndev)
+static s32
+wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac)
{
s32 inst;
+ u16 mode;
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ s32 ret = BCME_ERROR;
unsigned long flags;
- u16 cur_state;
- spin_lock_irqsave(&cfg->wps_sync, flags);
+ WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ /* Get current instance for the given ndev */
+ inst = wl_get_wps_inst_match(cfg, ndev);
+ if (inst == BCME_ERROR) {
+ /* No active WPS session. Do Nothing. */
+ WL_DBG(("[%s][WPS] No matching instance.\n", ndev->name));
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return BCME_NOTFOUND;
+ }
+ mode = cfg->wps_session[inst].mode;
+ WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+
+ WL_DBG(("[%s][WPS] state:%d mode:%d Peer: " MACDBG "\n",
+ ndev->name, state, mode, MAC2STRDBG(peer_mac)));
+
+ switch (state) {
+ case WPS_STATE_M8_RECVD:
+ {
+ /* Occasionally, due to race condition between ctrl
+ * and data path, deauth ind is recvd before EAP-FAIL.
+ * Ignore deauth ind before EAP-FAIL
+ * So move to REAUTH WAIT on receiving M8 on GC and
+ * ignore deauth ind before EAP-FAIL till 'x' timeout.
+ * Kickoff a timer to monitor reauth status.
+ */
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for AP/GO mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_M8_SENT:
+ {
+ /* Mantain the M8 sent state to verify
+ * EAP-FAIL sent is valid
+ */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_m8_sent(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for STA/GC mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_EAP_FAIL:
+ {
+ /* Move to REAUTH WAIT following EAP-FAIL TX on GO/AP.
+ * Kickoff a timer to monitor reauth status
+ */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
+ } else {
+ /* Nothing to be done for STA/GC mode */
+ ret = BCME_OK;
+ }
+ break;
+ }
+ case WPS_STATE_LINKDOWN:
+ {
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_sta_linkdown(ndev, inst);
+ } else if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ ret = wl_wps_handle_peersta_linkdown(ndev, inst, peer_mac);
+ }
+ }
+ break;
+ }
+ case WPS_STATE_LINKUP:
+ {
+ if (mode == WL_MODE_BSS) {
+ wl_wps_handle_sta_linkup(ndev, inst);
+ } else if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ wl_wps_handle_peersta_linkup(ndev, inst, peer_mac);
+ }
+ }
+ break;
+ }
+ case WPS_STATE_DISCONNECT_CLIENT:
+ {
+ /* Disconnect STA/GC command from user space */
+ if (mode == WL_MODE_AP) {
+ ret = wl_wps_handle_disconnect_client(ndev, inst, peer_mac);
+ } else {
+ WL_ERR(("[WPS] Unsupported mode %d\n", mode));
+ }
+ break;
+ }
+ case WPS_STATE_DISCONNECT:
+ {
+ /* Disconnect command on STA/GC interface */
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_disconnect(ndev, inst, peer_mac);
+ }
+ break;
+ }
+ case WPS_STATE_CONNECT_FAIL:
+ {
+ if (mode == WL_MODE_BSS) {
+ ret = wl_wps_handle_connect_fail(ndev, inst);
+ } else {
+ WL_ERR(("[WPS] Unsupported mode %d\n", mode));
+ }
+ break;
+ }
+ case WPS_STATE_AUTHORIZE:
+ {
+ if (mode == WL_MODE_AP) {
+ /* Take action only for matching peer mac */
+ if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
+ wl_wps_handle_authorize(ndev, inst, peer_mac);
+ } else {
+ WL_INFORM_MEM(("[WPS] Authorize Request for wrong peer\n"));
+ }
+ }
+ break;
+ }
+
+ default:
+ WL_ERR(("[WPS] Unsupported state:%d mode:%d\n", state, mode));
+ ret = BCME_ERROR;
+ }
+
+ return ret;
+}
- /* Get current instance for the given ndev */
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst == BCME_ERROR) {
- WL_DBG(("[WPS] instance match NOT found\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+#define EAP_EXP_ATTRIB_DATA_OFFSET 14
+void
+wl_handle_wps_states(struct net_device *ndev, u8 *pkt, u16 len, bool direction)
+{
+ eapol_header_t *eapol_hdr;
+ bool tx_packet = direction;
+ u16 eapol_type;
+ u16 mode;
+ u8 *peer_mac;
+
+ if (!ndev || !pkt) {
+ WL_ERR(("[WPS] Invalid arg\n"));
return;
}
- cur_state = cfg->wps_session[inst].state;
- if (cur_state != WPS_STATE_DONE) {
- WL_DBG(("[WPS] wrong state:%d\n", cur_state));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ if (len < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
+ WL_ERR(("[WPS] Invalid len\n"));
return;
}
- /* Mark this as unused */
- cfg->wps_session[inst].in_use = false;
- cfg->wps_session[inst].state = WPS_STATE_IDLE;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ eapol_hdr = (eapol_header_t *)pkt;
+ eapol_type = eapol_hdr->type;
- /* Ensure this API is called from sleepable context. */
- if (timer_pending(&cfg->wps_session[inst].timer)) {
- del_timer_sync(&cfg->wps_session[inst].timer);
- }
+ peer_mac = tx_packet ? eapol_hdr->eth.ether_dhost :
+ eapol_hdr->eth.ether_shost;
+ /*
+ * The implementation assumes only one WPS session would be active
+ * per interface at a time. Even for hostap, the wps_pin session
+ * is limited to one enrollee/client at a time. A session is marked
+ * started on WSC_START and gets cleared from below contexts
+ * a) Deauth/link down before reaching EAP-FAIL state. (Fail case)
+ * b) Link up following EAP-FAIL. (success case)
+ * c) Link up timeout after EAP-FAIL. (Fail case)
+ */
- WL_INFORM_MEM(("[%s][WPS] session deleted\n", ndev->name));
-}
+ if (eapol_type == EAP_PACKET) {
+ wl_eap_header_t *eap;
-static void
-wl_wps_handle_ifdel(struct net_device *ndev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 inst;
+ if (len > sizeof(*eap)) {
+ eap = (wl_eap_header_t *)(pkt + ETHER_HDR_LEN + EAPOL_HDR_LEN);
+ if (eap->type == EAP_EXPANDED_TYPE) {
+ wl_eap_exp_t *exp = (wl_eap_exp_t *)eap->data;
+ if (eap->length > EAP_EXP_HDR_MIN_LENGTH) {
+ /* opcode is at fixed offset */
+ u8 opcode = exp->opcode;
+ u16 eap_len = ntoh16(eap->length);
- spin_lock_irqsave(&cfg->wps_sync, flags);
- inst = wl_get_wps_inst_match(cfg, ndev);
- cur_state = cfg->wps_session[inst].state;
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_DBG(("[%s][WPS] EAP EXPANDED packet. opcode:%x len:%d\n",
+ ndev->name, opcode, eap_len));
+ if (opcode == EAP_WSC_MSG) {
+ const u8 *msg;
+ const u8* parse_buf = exp->data;
+ /* Check if recvd pkt is fragmented */
+ if ((!tx_packet) &&
+ (exp->flags &
+ EAP_EXP_FLAGS_FRAGMENTED_DATA)) {
+ if ((eap_len - EAP_EXP_ATTRIB_DATA_OFFSET)
+ > 2) {
+ parse_buf +=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ eap_len -=
+ EAP_EXP_FRAGMENT_LEN_OFFSET;
+ WL_DBG(("Rcvd EAP"
+ " fragmented pkt\n"));
+ } else {
+ /* If recvd pkt is fragmented
+ * and does not have
+ * length field drop the packet.
+ */
+ return;
+ }
+ }
- WL_INFORM_MEM(("[%s][WPS] state:%x\n", ndev->name, cur_state));
- if (cur_state > WPS_STATE_IDLE) {
- wl_wps_session_del(ndev);
+ msg = wl_find_attribute(parse_buf,
+ (eap_len - EAP_EXP_ATTRIB_DATA_OFFSET),
+ EAP_ATTRIB_MSGTYPE);
+ if (unlikely(!msg)) {
+ WL_ERR(("[WPS] ATTRIB MSG not found!\n"));
+ } else if ((*msg == EAP_WSC_MSG_M8) &&
+ !tx_packet) {
+ WL_INFORM_MEM(("[%s][WPS] M8\n",
+ ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_M8_RECVD, peer_mac);
+ } else if ((*msg == EAP_WSC_MSG_M8) &&
+ tx_packet) {
+ WL_INFORM_MEM(("[%s][WPS] M8 Sent\n",
+ ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_M8_SENT, peer_mac);
+ } else {
+ WL_DBG(("[%s][WPS] EAP WSC MSG: 0x%X\n",
+ ndev->name, *msg));
+ }
+ } else if (opcode == EAP_WSC_START) {
+ /* WSC session started. WSC_START - Tx from GO/AP.
+ * Session will be deleted on successful link up or
+ * on failure (deauth context)
+ */
+ mode = tx_packet ? WL_MODE_AP : WL_MODE_BSS;
+ wl_wps_session_add(ndev, mode, peer_mac);
+ WL_INFORM_MEM(("[%s][WPS] WSC_START Mode:%d\n",
+ ndev->name, mode));
+ } else if (opcode == EAP_WSC_DONE) {
+ /* WSC session done. TX on STA/GC. RX on GO/AP
+ * On devices where config file save fails, it may
+ * return WPS_NAK with config_error:0. But the
+ * connection would still proceed. Hence don't let
+ * state machine depend on WSC DONE.
+ */
+ WL_INFORM_MEM(("[%s][WPS] WSC_DONE\n", ndev->name));
+ }
+ }
+ }
+
+ if (eap->code == EAP_CODE_FAILURE) {
+ /* EAP_FAIL */
+ WL_INFORM_MEM(("[%s][WPS] EAP_FAIL\n", ndev->name));
+ wl_wps_session_update(ndev,
+ WPS_STATE_EAP_FAIL, peer_mac);
+ }
+ }
}
}
+#endif /* WL_WPS_SYNC */
-static s32
-wl_wps_handle_sta_linkdown(struct net_device *ndev, u16 inst)
+s32
+wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
+ int err = BCME_OK;
+ u32 status = ntoh32(event->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ u32 reason = ntoh32(event->reason);
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- wl_clr_drv_status(cfg, CONNECTED, ndev);
- wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down\n", ndev->name));
- /* Drop the link down event while we are waiting for reauth */
- return BCME_UNSUPPORTED;
- } else if (cur_state == WPS_STATE_STARTED) {
- /* Link down before reaching EAP-FAIL. End WPS session */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] link down after wps start\n", ndev->name));
- } else {
- WL_DBG(("[%s][WPS] link down in state:%d\n",
- ndev->name, cur_state));
+ if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
+ /* Join attempt via non-cfg80211 interface.
+ * Don't send resultant events to cfg80211
+ * layer
+ */
+ WL_INFORM_MEM(("Event received in non-cfg80211"
+ " connect state. Ignore\n"));
+ return BCME_OK;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ if ((status == WLC_SUP_KEYED || status == WLC_SUP_KEYXCHANGE_WAIT_G1) &&
+ reason == WLC_E_SUP_OTHER) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+ /* NL80211_CMD_PORT_AUTHORIZED supported above >= 4.15 */
+ cfg80211_port_authorized(ndev, (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID),
+ GFP_KERNEL);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#elif ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
+ defined(WL_VENDOR_EXT_SUPPORT))
+ err = wl_cfgvendor_send_async_event(bcmcfg_to_wiphy(cfg), ndev,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED, NULL, 0);
+ WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
+#else
+ /* not supported in kernel <= 3,14,0 */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+ } else if (status < WLC_SUP_KEYXCHANGE_WAIT_G1 && (reason != WLC_E_SUP_OTHER &&
+ reason != WLC_E_SUP_PTK_UPDATE)) {
+ /* if any failure seen while 4way HS, should send NL80211_CMD_DISCONNECT */
+ WL_ERR(("4way HS error. status:%d, reason:%d\n", status, reason));
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ }
- if (wps_done) {
- wl_wps_session_del(ndev);
+ return err;
+}
+
+#ifdef WL_BCNRECV
+static s32
+wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 status = ntoh32(e->status);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* Abort fakeapscan, when Roam is in progress */
+ if (status == WLC_E_STATUS_RXBCN_ABORT) {
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_ROAMABORT);
+ } else {
+ WL_ERR(("UNKNOWN STATUS. status:%d\n", status));
}
return BCME_OK;
}
+#endif /* WL_BCNRECV */
+#ifdef WL_MBO
static s32
-wl_wps_handle_peersta_linkdown(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
-
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ s32 err = 0;
+ wl_event_mbo_t *mbo_evt = (wl_event_mbo_t *)data;
+ wl_event_mbo_cell_nw_switch_t *cell_sw_evt = NULL;
+ wl_btm_event_type_data_t *evt_data = NULL;
- if (!peer_mac) {
- WL_ERR(("Invalid arg\n"));
- ret = BCME_ERROR;
- goto exit;
- }
+ WL_INFORM(("MBO: Evt %u\n", mbo_evt->type));
- /* AP/GO can have multiple clients. so validate peer_mac addr
- * and ensure states are updated only for right peer.
- */
- if (memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- /* Mac addr not matching. Ignore. */
- WL_DBG(("[%s][WPS] No active WPS session"
- "for the peer:" MACDBG "\n", ndev->name, MAC2STRDBG(peer_mac)));
- ret = BCME_OK;
- goto exit;
- }
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down."
- " Peer: " MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
-#ifdef NOT_YET
- /* Link down during REAUTH state is expected. However,
- * if this is send up, hostapd statemachine issues a
- * deauth down and that may pre-empt WPS reauth state
- * at GC.
- */
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down. Ignore."
- " for client:" MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
- ret = BCME_UNSUPPORTED;
-#endif // endif
- } else if (cur_state == WPS_STATE_STARTED) {
- /* Link down before reaching REAUTH_WAIT state. WPS
- * session ended.
- */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- WL_INFORM_MEM(("[%s][WPS] link down after wps start"
- " client:" MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
- wps_done = true;
- /* since we have freed lock above, return from here */
- ret = BCME_OK;
+ if (mbo_evt->type == WL_MBO_E_CELLULAR_NW_SWITCH) {
+ cell_sw_evt = (wl_event_mbo_cell_nw_switch_t *)mbo_evt->data;
+ BCM_REFERENCE(cell_sw_evt);
+ SUPP_EVENT(("CTRL-EVENT-CELLULAR-SWITCH", "reason %d cur_assoc_time_left %u "
+ "reassoc_delay %u\n", cell_sw_evt->reason,
+ cell_sw_evt->assoc_time_remain, cell_sw_evt->reassoc_delay));
+ } else if (mbo_evt->type == WL_MBO_E_BTM_RCVD) {
+ evt_data = (wl_btm_event_type_data_t *)mbo_evt->data;
+ if (evt_data->version != WL_BTM_EVENT_DATA_VER_1) {
+ WL_ERR(("version mismatch. rcvd %u expected %u\n",
+ evt_data->version, WL_BTM_EVENT_DATA_VER_1));
+ return -1;
+ }
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BTM-REQ-RCVD", "reason=%u\n",
+ evt_data->transition_reason));
} else {
- WL_ERR(("[%s][WPS] Unsupported state:%d",
- ndev->name, cur_state));
- ret = BCME_ERROR;
- }
-exit:
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ WL_INFORM(("UNKNOWN EVENT. type:%u\n", mbo_evt->type));
}
- return ret;
+ return err;
}
+#endif /* WL_MBO */
+#ifdef WL_CAC_TS
static s32
-wl_wps_handle_sta_linkup(struct net_device *ndev, u16 inst)
+wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
+ u32 event = ntoh32(e->event_type);
+ s32 status = ntoh32(e->status);
+ s32 reason = ntoh32(e->reason);
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- /* WPS session succeeded. del session. */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] WPS_REAUTH link up (WPS DONE)\n", ndev->name));
- ret = BCME_OK;
- } else {
- WL_ERR(("[%s][WPS] unexpected link up in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
- }
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ BCM_REFERENCE(reason);
+
+ if (event == WLC_E_ADDTS_IND) {
+ /* The supp log format of adding ts_delay in success case needs to be maintained */
+ if (status == WLC_E_STATUS_SUCCESS) {
+ uint *ts_delay = (uint *)data;
+ BCM_REFERENCE(ts_delay);
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d ts_delay=%u\n",
+ status, reason, *ts_delay));
+ } else {
+ SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d\n",
+ status, reason));
+ }
+ } else if (event == WLC_E_DELTS_IND) {
+ SUPP_EVENT(("CTRL-EVENT-CAC-DELTS", "status=%d reason=%d\n", status, reason));
}
- return ret;
+
+ return BCME_OK;
}
+#endif /* WL_CAC_TS */
+#if defined(WL_MBO) || defined(WL_OCE)
static s32
-wl_wps_handle_peersta_linkup(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
+ s32 err = 0;
+ uint reason = 0;
+ wl_bssid_pruned_evt_info_t *evt_info = (wl_bssid_pruned_evt_info_t *)data;
+
+ if (evt_info->version == WL_BSSID_PRUNE_EVT_VER_1) {
+ if (evt_info->reason == WLC_E_PRUNE_ASSOC_RETRY_DELAY) {
+ /* MBO assoc retry delay */
+ reason = WIFI_PRUNE_ASSOC_RETRY_DELAY;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms)\n", evt_info->SSID,
+ ETHER_TO_MACF(evt_info->BSSID), reason, evt_info->time_remaining));
+ } else if (evt_info->reason == WLC_E_PRUNE_RSSI_ASSOC_REJ) {
+ /* OCE RSSI-based assoc rejection */
+ reason = WIFI_PRUNE_RSSI_ASSOC_REJ;
+ SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
+ " reason=%u timeout_val=%u(ms) rssi_threshold=%d(dBm)\n",
+ evt_info->SSID, ETHER_TO_MACF(evt_info->BSSID),
+ reason, evt_info->time_remaining, evt_info->rssi_threshold));
+ } else {
+ /* Invalid other than the assoc retry delay/RSSI assoc rejection
+ * in the current handler
+ */
+ BCM_REFERENCE(reason);
+ WL_INFORM(("INVALID. reason:%u\n", evt_info->reason));
+ }
+ } else {
+ WL_INFORM(("version mismatch. rcvd %u expected %u\n", evt_info->version,
+ WL_BSSID_PRUNE_EVT_VER_1));
+ }
+ return err;
+}
+#endif /* WL_MBO || WL_OCE */
+#ifdef RTT_SUPPORT
+static s32
+wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_event_msg_t event;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ (void)memcpy_s(&event, sizeof(wl_event_msg_t),
+ e, sizeof(wl_event_msg_t));
+ return dhd_rtt_event_handler(dhdp, &event, data);
+}
+#endif /* RTT_SUPPORT */
- /* For AP case, check whether call came for right peer */
- if (!peer_mac ||
- memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- WL_ERR(("[WPS] macaddr mismatch\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- /* Mac addr not matching. Ignore. */
- return BCME_ERROR;
+void
+wl_print_verinfo(struct bcm_cfg80211 *cfg)
+{
+ char *ver_ptr;
+ uint32 alloc_len = MOD_PARAM_INFOLEN;
+
+ if (!cfg) {
+ WL_ERR(("cfg is NULL\n"));
+ return;
}
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- WL_INFORM_MEM(("[%s][WPS] REAUTH link up\n", ndev->name));
- ret = BCME_OK;
- } else {
- WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
+ ver_ptr = (char *)MALLOCZ(cfg->osh, alloc_len);
+ if (!ver_ptr) {
+ WL_ERR(("Failed to alloc ver_ptr\n"));
+ return;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ TRUE, &ver_ptr, alloc_len)) {
+ WL_ERR(("DHD Version: %s\n", ver_ptr));
+ }
- return ret;
+ if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
+ FALSE, &ver_ptr, alloc_len)) {
+ WL_ERR(("F/W Version: %s\n", ver_ptr));
+ }
+
+ MFREE(cfg->osh, ver_ptr, alloc_len);
}
+#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
+typedef struct {
+ uint16 id;
+ uint16 len;
+ uint32 val;
+} he_xtlv_v32;
-static s32
-wl_wps_handle_authorize(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+ static bool
+wl_he_get_uint_cb(void *ctx, uint16 *id, uint16 *len)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
- s32 ret = BCME_OK;
+ he_xtlv_v32 *v32 = ctx;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ *id = v32->id;
+ *len = v32->len;
- /* For AP case, check whether call came for right peer */
- if (!peer_mac ||
- memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- WL_ERR(("[WPS] macaddr mismatch\n"));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- /* Mac addr not matching. Ignore. */
- return BCME_ERROR;
+ return FALSE;
+}
+
+ static void
+wl_he_pack_uint_cb(void *ctx, uint16 id, uint16 len, uint8 *buf)
+{
+ he_xtlv_v32 *v32 = ctx;
+
+ BCM_REFERENCE(id);
+ BCM_REFERENCE(len);
+
+ v32->val = htod32(v32->val);
+
+ switch (v32->len) {
+ case sizeof(uint8):
+ *buf = (uint8)v32->val;
+ break;
+ case sizeof(uint16):
+ store16_ua(buf, (uint16)v32->val);
+ break;
+ case sizeof(uint32):
+ store32_ua(buf, v32->val);
+ break;
+ default:
+ /* ASSERT(0); */
+ break;
}
+}
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- /* WPS session succeeded. del session. */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] Authorize done (WPS DONE)\n", ndev->name));
- ret = BCME_OK;
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 interface_type, bool set)
+{
+ bcm_xtlv_t read_he_xtlv;
+ uint8 se_he_xtlv[32];
+ int se_he_xtlv_len = sizeof(se_he_xtlv);
+ he_xtlv_v32 v32;
+ u32 he_feature = 0;
+ s32 err = 0;
+ u32 he_interface = 0;
+
+ read_he_xtlv.id = WL_HE_CMD_FEATURES;
+ read_he_xtlv.len = 0;
+ err = wldev_iovar_getbuf_bsscfg(dev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL);
+ if (err < 0) {
+ if (err == BCME_UNSUPPORTED) {
+ /* HE not supported. Do nothing. */
+ return BCME_OK;
+ }
+ WL_ERR(("HE get failed. error=%d\n", err));
} else {
- WL_INFORM_MEM(("[%s][WPS] unexpected Authorize in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
+ he_feature = *(int*)cfg->ioctl_buf;
+ he_feature = dtoh32(he_feature);
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ v32.id = WL_HE_CMD_FEATURES;
+ v32.len = sizeof(s32);
+ if (interface_type == WL_IF_TYPE_P2P_DISC) {
+ he_interface = WL_HE_FEATURES_HE_P2P;
+ } else if (interface_type == WL_IF_TYPE_AP) {
+ he_interface = WL_HE_FEATURES_HE_AP;
+ } else {
+ WL_ERR(("HE request for Invalid interface type"));
+ err = BCME_BADARG;
+ return err;
}
- return ret;
+
+ if (set) {
+ v32.val = (he_feature | he_interface);
+ } else {
+ v32.val = (he_feature & ~he_interface);
+ }
+
+ err = bcm_pack_xtlv_buf((void *)&v32, se_he_xtlv, sizeof(se_he_xtlv),
+ BCM_XTLV_OPTION_ALIGN32, wl_he_get_uint_cb, wl_he_pack_uint_cb,
+ &se_he_xtlv_len);
+ if (err != BCME_OK) {
+ WL_ERR(("failed to pack he settvl=%d\n", err));
+ }
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "he", &se_he_xtlv, sizeof(se_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", err));
+ }
+ WL_INFORM(("Set HE[%d] done\n", set));
+
+ return err;
}
+#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
+
+/* Get the concurrency mode */
+int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
+ uint cmode = CONCURRENCY_MODE_NONE;
+ u32 connected_cnt = 0;
+ u32 pre_channel = 0, channel = 0;
+ u32 pre_band = 0;
+ u32 chanspec = 0;
+ u32 band = 0;
+ connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ if (connected_cnt <= 1) {
+ return cmode;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(chanspec));
+ band = (channel <= CH_MAX_2G_CHANNEL) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ }
+ if ((!pre_channel && channel)) {
+ pre_band = band;
+ pre_channel = channel;
+ } else if (pre_channel) {
+ if ((pre_band == band) && (pre_channel == channel)) {
+ cmode = CONCURRENCY_SCC_MODE;
+ goto exit;
+ } else if ((pre_band == band) && (pre_channel != channel)) {
+ cmode = CONCURRENCY_VSDB_MODE;
+ goto exit;
+ } else if (pre_band != band) {
+ cmode = CONCURRENCY_RSDB_MODE;
+ goto exit;
+ }
+ }
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif // endif
+exit:
+ return cmode;
+}
+#ifdef WL_CHAN_UTIL
static s32
-wl_wps_handle_reauth(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- u16 mode;
- s32 ret = BCME_OK;
+ s32 err = BCME_OK;
+ struct sk_buff *skb = NULL;
+ s32 status = ntoh32(e->status);
+ u8 chan_use_percentage = 0;
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ uint len;
+ gfp_t kflags;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- mode = cfg->wps_session[inst].mode;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ len = CU_ATTR_HDR_LEN + sizeof(u8);
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
- if (((mode == WL_MODE_BSS) && (cur_state == WPS_STATE_STARTED)) ||
- ((mode == WL_MODE_AP) && (cur_state == WPS_STATE_M8_SENT))) {
- /* Move to reauth wait */
- cfg->wps_session[inst].state = WPS_STATE_REAUTH_WAIT;
- /* Use ndev to find the wps instance which fired the timer */
- cfg->wps_session[inst].timer.data = (unsigned long) ndev;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- mod_timer(&cfg->wps_session[inst].timer,
- jiffies + msecs_to_jiffies(WL_WPS_REAUTH_TIMEOUT));
- WL_INFORM_MEM(("[%s][WPS] STATE_REAUTH_WAIT mode:%d Peer: " MACDBG "\n",
- ndev->name, mode, MAC2STRDBG(peer_mac)));
- return BCME_OK;
- } else {
- /* 802.1x cases */
- WL_DBG(("[%s][WPS] EAP-FAIL\n", ndev->name));
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), len,
+ BRCM_VENDOR_EVENT_CU, kflags);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ skb = cfg80211_vendor_event_alloc(wiphy, len, BRCM_VENDOR_EVENT_CU, kflags);
+#else
+ /* No support exist */
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return ret;
+
+ if ((status == WLC_E_STATUS_SUCCESS) && data) {
+ wl_bssload_t *bssload_report = (wl_bssload_t *)data;
+ chan_use_percentage = (bssload_report->chan_util * 100) / 255;
+ WL_DBG(("ChannelUtilization=%hhu\n", chan_use_percentage));
+ err = nla_put_u8(skb, CU_ATTR_PERCENTAGE, chan_use_percentage);
+ if (err < 0) {
+ WL_ERR(("Failed to put CU_ATTR_PERCENTAGE, err:%d\n", err));
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ cfg80211_vendor_event(skb, kflags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+
+ return err;
}
+#define WL_CHAN_UTIL_DEFAULT_INTERVAL 3000
+#define WL_CHAN_UTIL_THRESH_MIN 15
+#define WL_CHAN_UTIL_THRESH_INTERVAL 10
+#ifndef CUSTOM_CU_INTERVAL
+#define CUSTOM_CU_INTERVAL WL_CHAN_UTIL_DEFAULT_INTERVAL
+#endif /* CUSTOM_CU_INTERVAL */
+
static s32
-wl_wps_handle_disconnect(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+wl_cfg80211_start_bssload_report(struct net_device *ndev)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
+ s32 err = BCME_OK;
+ wl_bssload_cfg_t blcfg;
+ u8 i;
+ struct bcm_cfg80211 *cfg;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- /* If Disconnect command comes from user space for STA/GC,
- * respond with event without waiting for event from fw as
- * it would be dropped by the WPS_SYNC code.
- */
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- if (ETHER_ISBCAST(peer_mac)) {
- WL_DBG(("[WPS] Bcast peer. Do nothing.\n"));
- } else {
- /* Notify link down */
- CFG80211_DISCONNECTED(ndev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- true, GFP_ATOMIC);
- }
- } else {
- WL_DBG(("[%s][WPS] Not valid state to report disconnected:%d",
- ndev->name, cur_state));
- ret = BCME_UNSUPPORTED;
+ if (!ndev) {
+ return -ENODEV;
+ }
+
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
+ }
+
+ /* Typecasting to void as the buffer size is same as the memset size */
+ (void)memset_s(&blcfg, sizeof(wl_bssload_cfg_t), 0, sizeof(wl_bssload_cfg_t));
+ /* Set default report interval 3 sec and 8 threshhold levels between 15 to 85% */
+ blcfg.rate_limit_msec = CUSTOM_CU_INTERVAL;
+ blcfg.num_util_levels = MAX_BSSLOAD_LEVELS;
+ for (i = 0; i < MAX_BSSLOAD_LEVELS; i++) {
+ blcfg.util_levels[i] = (((WL_CHAN_UTIL_THRESH_MIN +
+ (i * WL_CHAN_UTIL_THRESH_INTERVAL)) * 255) / 100);
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return ret;
+
+ err = wldev_iovar_setbuf(ndev, "bssload_report_event", &blcfg,
+ sizeof(wl_bssload_cfg_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ }
+
+ return err;
}
+#endif /* WL_CHAN_UTIL */
-static s32
-wl_wps_handle_disconnect_client(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+s32
+wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- /* For GO/AP, ignore disconnect client during reauth state */
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- if (ETHER_ISBCAST(peer_mac)) {
- /* If there is broadcast deauth, then mark wps session as ended */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] BCAST deauth. WPS stopped.\n", ndev->name));
- ret = BCME_OK;
- goto exit;
- } else if (!(memcmp(cfg->wps_session[inst].peer_mac,
- peer_mac, ETH_ALEN))) {
- WL_ERR(("[%s][WPS] Drop disconnect client\n", ndev->name));
- ret = BCME_UNSUPPORTED;
- }
+ if (!ndev) {
+ return -ENODEV;
}
-exit:
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ return -ENODEV;
}
- return ret;
-}
-static s32
-wl_wps_handle_connect_fail(struct net_device *ndev, u16 inst)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
+ mutex_lock(&cfg->event_sync);
+ err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] Connect fail. WPS stopped.\n",
- ndev->name));
+ (void)memcpy_s(eventmask, WL_EVENTING_MASK_LEN, iovbuf, WL_EVENTING_MASK_LEN);
+ /* Add set/clear of event mask under feature specific flags */
+ if (enable) {
+ WL_DBG(("%s: Enabling events on resume\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ setbit(eventmask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
} else {
- WL_ERR(("[%s][WPS] Connect fail. state:%d\n",
- ndev->name, cur_state));
+ WL_DBG(("%s: Disabling events before suspend\n", __FUNCTION__));
+#ifdef WL_CHAN_UTIL
+ clrbit(eventmask, WLC_E_BSS_LOAD);
+#endif /* WL_CHAN_UTIL */
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+
+ err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
}
- return BCME_OK;
+
+eventmsg_out:
+ mutex_unlock(&cfg->event_sync);
+ return err;
}
-static s32
-wl_wps_handle_m8_sent(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+int
+wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
+ s32 err = BCME_OK;
+ s32 chan = 0;
+ u32 band = 0;
+ u32 bw = WL_CHANSPEC_BW_20;
+ chanspec_t chspec = 0;
+ wl_chan_switch_t csa_arg;
+ struct cfg80211_chan_def *chandef = ¶ms->chandef;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- spin_lock_irqsave(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ dev = ndev_to_wlc_ndev(dev, cfg);
+ chan = ieee80211_frequency_to_channel(chandef->chan->center_freq);
+ band = chandef->chan->band;
- if (cur_state == WPS_STATE_STARTED) {
- /* Move to M8 sent state */
- cfg->wps_session[inst].state = WPS_STATE_M8_SENT;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
+ WL_ERR(("netdev_ifidx(%d), target channel(%d) target bandwidth(%d),"
+ " mode(%d), count(%d)\n", dev->ifindex, chan, chandef->width,
+ params->block_tx, params->count));
+
+ if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) {
+ WL_ERR(("Channel Switch doesn't support on "
+ "the non-SoftAP mode\n"));
+ return -EINVAL;
+ }
+
+ /* Check if STA is trying to associate with an AP */
+ if (wl_get_drv_status(cfg, CONNECTING, primary_dev)) {
+ WL_ERR(("Connecting is in progress\n"));
+ return BCME_BUSY;
+ }
+
+ if (chan == cfg->ap_oper_channel) {
+ WL_ERR(("Channel %d is same as current operating channel,"
+ " so skip\n", chan));
return BCME_OK;
+ }
+
+ if (band == IEEE80211_BAND_5GHZ) {
+#ifdef APSTA_RESTRICTED_CHANNEL
+ if (chan != DEFAULT_5G_SOFTAP_CHANNEL) {
+ WL_ERR(("Invalid 5G Channel, chan=%d\n", chan));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+ err = wl_get_bandwidth_cap(primary_dev, band, &bw);
+ if (err < 0) {
+ WL_ERR(("Failed to get bandwidth information,"
+ " err=%d\n", err));
+ return err;
+ }
+ } else if (band == IEEE80211_BAND_2GHZ) {
+#ifdef APSTA_RESTRICTED_CHANNEL
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ u32 *sta_chan = (u32 *)wl_read_prof(cfg,
+ primary_dev, WL_PROF_CHAN);
+
+ /* In 2GHz STA/SoftAP concurrent mode, the operating channel
+ * of STA and SoftAP should be confgiured to the same 2GHz
+ * channel. Otherwise, it is an invalid configuration.
+ */
+ if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
+ wl_get_drv_status(cfg, CONNECTED, primary_dev) &&
+ sta_chan && (*sta_chan != chan)) {
+ WL_ERR(("Invalid 2G Channel in case of STA/SoftAP"
+ " concurrent mode, sta_chan=%d, chan=%d\n",
+ *sta_chan, chan));
+ return -EINVAL;
+ }
+#endif /* APSTA_RESTRICTED_CHANNEL */
+ bw = WL_CHANSPEC_BW_20;
} else {
- /* 802.1x cases */
- WL_DBG(("[%s][WPS] Not valid state to send M8\n", ndev->name));
+ WL_ERR(("invalid band (%d)\n", band));
+ return -EINVAL;
}
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return ret;
+
+ chspec = wf_channel2chspec(chan, bw);
+ if (!wf_chspec_valid(chspec)) {
+ WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+ return -EINVAL;
+ }
+
+ /* Send CSA to associated STAs */
+ memset(&csa_arg, 0, sizeof(wl_chan_switch_t));
+ csa_arg.mode = params->block_tx;
+ csa_arg.count = params->count;
+ csa_arg.chspec = chspec;
+ csa_arg.frame_type = CSA_BROADCAST_ACTION_FRAME;
+ csa_arg.reg = 0;
+
+ err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(wl_chan_switch_t),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("Failed to switch channel, err=%d\n", err));
+ }
+
+ return err;
}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
-static s32
-wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac)
+#ifdef WL_WIPSEVT
+int
+wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event)
{
- s32 inst;
- u16 mode;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- s32 ret = BCME_ERROR;
- unsigned long flags;
+ s32 err = BCME_OK;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ gfp_t kflags;
+ struct bcm_cfg80211 *cfg;
+ struct net_device *ndev;
+ struct wiphy *wiphy;
- spin_lock_irqsave(&cfg->wps_sync, flags);
- /* Get current instance for the given ndev */
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst == BCME_ERROR) {
- /* No active WPS session. Do Nothing. */
- WL_DBG(("[%s][WPS] No matching instance.\n", ndev->name));
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- return BCME_OK;
+ cfg = wl_cfg80211_get_bcmcfg();
+ if (!cfg || !cfg->wdev) {
+ WL_ERR(("WIPS evt invalid arg\n"));
+ return err;
}
- mode = cfg->wps_session[inst].mode;
- spin_unlock_irqrestore(&cfg->wps_sync, flags);
- WL_DBG(("[%s][WPS] state:%d mode:%d Peer: " MACDBG "\n",
- ndev->name, state, mode, MAC2STRDBG(peer_mac)));
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wiphy = bcmcfg_to_wiphy(cfg);
- switch (state) {
- case WPS_STATE_M8_RECVD:
- {
- /* Occasionally, due to race condition between ctrl
- * and data path, deauth ind is recvd before EAP-FAIL.
- * Ignore deauth ind before EAP-FAIL
- * So move to REAUTH WAIT on receiving M8 on GC and
- * ignore deauth ind before EAP-FAIL till 'x' timeout.
- * Kickoff a timer to monitor reauth status.
- */
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for AP/GO mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_M8_SENT:
- {
- /* Mantain the M8 sent state to verify
- * EAP-FAIL sent is valid
- */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_m8_sent(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for STA/GC mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_EAP_FAIL:
- {
- /* Move to REAUTH WAIT following EAP-FAIL TX on GO/AP.
- * Kickoff a timer to monitor reauth status
- */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for STA/GC mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_LINKDOWN:
- {
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_sta_linkdown(ndev, inst);
- } else if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- ret = wl_wps_handle_peersta_linkdown(ndev, inst, peer_mac);
- }
- }
- break;
- }
- case WPS_STATE_LINKUP:
- {
- if (mode == WL_MODE_BSS) {
- wl_wps_handle_sta_linkup(ndev, inst);
- } else if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- wl_wps_handle_peersta_linkup(ndev, inst, peer_mac);
- }
- }
- break;
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev),
+ BRCM_VENDOR_WIPS_EVENT_BUF_LEN, BRCM_VENDOR_EVENT_WIPS, kflags);
+
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return BCME_NOMEM;
+ }
+
+ err = nla_put_u16(skb, WIPS_ATTR_DEAUTH_CNT, wips_event->misdeauth);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_CNT failed\n"));
+ goto fail;
+ }
+ err = nla_put(skb, WIPS_ATTR_DEAUTH_BSSID, ETHER_ADDR_LEN, &wips_event->bssid);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put WIPS_ATTR_DEAUTH_BSSID failed\n"));
+ goto fail;
+ }
+ err = nla_put_s16(skb, WIPS_ATTR_CURRENT_RSSI, wips_event->current_RSSI);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_CURRENT_RSSI failed\n"));
+ goto fail;
+ }
+ err = nla_put_s16(skb, WIPS_ATTR_DEAUTH_RSSI, wips_event->deauth_RSSI);
+ if (unlikely(err)) {
+ WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_RSSI failed\n"));
+ goto fail;
+ }
+ cfg80211_vendor_event(skb, kflags);
+
+ return err;
+
+fail:
+ if (skb) {
+ nlmsg_free(skb);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ return err;
+}
+
+int
+wl_cfg80211_wips_event(uint16 misdeauth, char* bssid)
+{
+ s32 err = BCME_OK;
+ wl_wips_event_info_t wips_event;
+
+ wips_event.misdeauth = misdeauth;
+ memcpy(&wips_event.bssid, bssid, ETHER_ADDR_LEN);
+ wips_event.current_RSSI = 0;
+ wips_event.deauth_RSSI = 0;
+
+ err = wl_cfg80211_wips_event_ext(&wips_event);
+ return err;
+}
+#endif /* WL_WIPSEVT */
+
+bool wl_cfg80211_check_in_progress(struct net_device *dev)
+{
+ /* TODO: Check for cfg status like scan in progress,
+ * four way handshake, etc before entering Deep Sleep.
+ */
+ return TRUE;
+}
+
+#ifdef SUPPORT_AP_SUSPEND
+void
+wl_set_ap_suspend_error_handler(struct net_device *ndev, bool suspend)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (wl_get_drv_status(cfg, READY, ndev)) {
+ /* IF dongle is down due to previous hang or other conditions, sending
+ * one more hang notification is not needed.
+ */
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
}
- case WPS_STATE_DISCONNECT_CLIENT:
- {
- /* Disconnect STA/GC command from user space */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_disconnect_client(ndev, inst, peer_mac);
- } else {
- WL_ERR(("[WPS] Unsupported mode %d\n", mode));
- }
- break;
+ dhdp->iface_op_failed = TRUE;
+#if defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd_bus_mem_dump(dhdp);
}
- case WPS_STATE_DISCONNECT:
- {
- /* Disconnect command on STA/GC interface */
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_disconnect(ndev, inst, peer_mac);
- }
- break;
+#endif /* DHD_FW_COREDUMP */
+ WL_ERR(("Notify hang event to upper layer \n"));
+ dhdp->hang_reason = suspend ?
+ HANG_REASON_BSS_DOWN_FAILURE : HANG_REASON_BSS_UP_FAILURE;
+ net_os_send_hang_message(ndev);
+ }
+}
+
+#define MAX_AP_RESUME_TIME 5000
+int
+wl_set_ap_suspend(struct net_device *dev, bool suspend, char *ifname)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ int ret = BCME_OK;
+ bool is_bssup = FALSE;
+ int bssidx;
+ unsigned long start_j;
+ int time_to_sleep = MAX_AP_RESUME_TIME;
+
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
+ }
+
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", ndev->ieee80211_ptr));
+ return BCME_NOTFOUND;
+ }
+
+ is_bssup = wl_cfg80211_bss_isup(ndev, bssidx);
+ if (is_bssup && suspend) {
+ wl_clr_drv_status(cfg, AP_CREATED, ndev);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+
+ if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 0)) < 0) {
+ WL_ERR(("AP suspend error %d, suspend %d\n", ret, suspend));
+ ret = BCME_NOTDOWN;
+ goto exit;
}
- case WPS_STATE_CONNECT_FAIL:
- {
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_connect_fail(ndev, inst);
- } else {
- WL_ERR(("[WPS] Unsupported mode %d\n", mode));
- }
- break;
+ } else if (!is_bssup && !suspend) {
+ /* Abort scan before starting AP again */
+ wl_cfg80211_scan_abort(cfg);
+
+ if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 1)) < 0) {
+ WL_ERR(("AP resume error %d, suspend %d\n", ret, suspend));
+ ret = BCME_NOTUP;
+ goto exit;
}
- case WPS_STATE_AUTHORIZE:
- {
- if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- wl_wps_handle_authorize(ndev, inst, peer_mac);
- } else {
- WL_INFORM_MEM(("[WPS] Authorize Request for wrong peer\n"));
+
+ while (TRUE) {
+ start_j = get_jiffies_64();
+ /* Wait for Linkup event to mark successful AP bring up */
+ ret = wait_event_interruptible_timeout(cfg->netif_change_event,
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ msecs_to_jiffies(time_to_sleep));
+ if (ret == -ERESTARTSYS) {
+ WL_ERR(("waitqueue was interrupted by a signal\n"));
+ time_to_sleep -= jiffies_to_msecs(get_jiffies_64() - start_j);
+ if (time_to_sleep <= 0) {
+ WL_ERR(("time to sleep hits 0\n"));
+ ret = BCME_NOTUP;
+ goto exit;
}
+ } else if (ret == 0 || !wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ WL_ERR(("AP resume failed!\n"));
+ ret = BCME_NOTUP;
+ goto exit;
+ } else {
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+ ret = BCME_OK;
+ break;
}
- break;
}
-
- default:
- WL_ERR(("[WPS] Unsupported state:%d mode:%d\n", state, mode));
- ret = BCME_ERROR;
+ } else {
+ /* bssup + resume or bssdown + suspend,
+ * So, returns OK
+ */
+ ret = BCME_OK;
}
+exit:
+ if (ret != BCME_OK)
+ wl_set_ap_suspend_error_handler(bcmcfg_to_prmry_ndev(cfg), suspend);
return ret;
}
+#endif /* SUPPORT_AP_SUSPEND */
-#define EAP_EXP_ATTRIB_DATA_OFFSET 14
-void
-wl_handle_wps_states(struct net_device *ndev, u8 *pkt, u16 len, bool direction)
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable)
{
- eapol_header_t *eapol_hdr;
- bool tx_packet = direction;
- u16 eapol_type;
- u16 mode;
- u8 *peer_mac;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
- if (!ndev || !pkt) {
- WL_ERR(("[WPS] Invalid arg\n"));
- return;
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
+
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
+ err = BCME_BADARG;
+ goto fail;
}
- if (len < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
- WL_ERR(("[WPS] Invalid len\n"));
- return;
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP) {
+ err = wldev_iovar_setint(ifdev, "softap_elnabypass", enable);
+ if (unlikely(err)) {
+ WL_ERR(("%s: Failed to set softap_elnabypass, err=%d\n",
+ __FUNCTION__, err));
+ }
+ } else {
+ WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
+ __FUNCTION__));
+ err = BCME_BADARG;
}
+fail:
+ return err;
+}
+int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int *enable = (int*)param;
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
- eapol_hdr = (eapol_header_t *)pkt;
- eapol_type = eapol_hdr->type;
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
- peer_mac = tx_packet ? eapol_hdr->eth.ether_dhost :
- eapol_hdr->eth.ether_shost;
- /*
- * The implementation assumes only one WPS session would be active
- * per interface at a time. Even for hostap, the wps_pin session
- * is limited to one enrollee/client at a time. A session is marked
- * started on WSC_START and gets cleared from below contexts
- * a) Deauth/link down before reaching EAP-FAIL state. (Fail case)
- * b) Link up following EAP-FAIL. (success case)
- * c) Link up timeout after EAP-FAIL. (Fail case)
- */
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
- if (eapol_type == EAP_PACKET) {
- wl_eap_header_t *eap;
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP) {
+ err = wldev_iovar_getint(ifdev, "softap_elnabypass", enable);
+ if (unlikely(err)) {
+ WL_ERR(("%s: Failed to get softap_elnabypass, err=%d\n",
+ __FUNCTION__, err));
+ }
+ } else {
+ WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
+ __FUNCTION__));
+ err = BCME_BADARG;
+ }
+fail:
+ return err;
- if (len > sizeof(*eap)) {
- eap = (wl_eap_header_t *)(pkt + ETHER_HDR_LEN + EAPOL_HDR_LEN);
- if (eap->type == EAP_EXPANDED_TYPE) {
- wl_eap_exp_t *exp = (wl_eap_exp_t *)eap->data;
- if (eap->length > EAP_EXP_HDR_MIN_LENGTH) {
- /* opcode is at fixed offset */
- u8 opcode = exp->opcode;
- u16 eap_len = ntoh16(eap->length);
+}
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
- WL_DBG(("[%s][WPS] EAP EXPANDED packet. opcode:%x len:%d\n",
- ndev->name, opcode, eap_len));
- if (opcode == EAP_WSC_MSG) {
- const u8 *msg;
- const u8* parse_buf = exp->data;
- /* Check if recvd pkt is fragmented */
- if ((!tx_packet) &&
- (exp->flags &
- EAP_EXP_FLAGS_FRAGMENTED_DATA)) {
- if ((eap_len - EAP_EXP_ATTRIB_DATA_OFFSET)
- > 2) {
- parse_buf +=
- EAP_EXP_FRAGMENT_LEN_OFFSET;
- eap_len -=
- EAP_EXP_FRAGMENT_LEN_OFFSET;
- WL_DBG(("Rcvd EAP"
- " fragmented pkt\n"));
- } else {
- /* If recvd pkt is fragmented
- * and does not have
- * length field drop the packet.
- */
- return;
- }
- }
+#ifdef SUPPORT_AP_BWCTRL
+#define OPER_MODE_ENABLE (1 << 8)
+static int op2bw[] = {20, 40, 80, 160};
- msg = wl_find_attribute(parse_buf,
- (eap_len - EAP_EXP_ATTRIB_DATA_OFFSET),
- EAP_ATTRIB_MSGTYPE);
- if (unlikely(!msg)) {
- WL_ERR(("[WPS] ATTRIB MSG not found!\n"));
- } else if ((*msg == EAP_WSC_MSG_M8) &&
- !tx_packet) {
- WL_INFORM_MEM(("[%s][WPS] M8\n",
- ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_M8_RECVD, peer_mac);
- } else if ((*msg == EAP_WSC_MSG_M8) &&
- tx_packet) {
- WL_INFORM_MEM(("[%s][WPS] M8 Sent\n",
- ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_M8_SENT, peer_mac);
- } else {
- WL_DBG(("[%s][WPS] EAP WSC MSG: 0x%X\n",
- ndev->name, *msg));
- }
- } else if (opcode == EAP_WSC_START) {
- /* WSC session started. WSC_START - Tx from GO/AP.
- * Session will be deleted on successful link up or
- * on failure (deauth context)
- */
- mode = tx_packet ? WL_MODE_AP : WL_MODE_BSS;
- wl_wps_session_add(ndev, mode, peer_mac);
- WL_INFORM_MEM(("[%s][WPS] WSC_START Mode:%d\n",
- ndev->name, mode));
- } else if (opcode == EAP_WSC_DONE) {
- /* WSC session done. TX on STA/GC. RX on GO/AP
- * On devices where config file save fails, it may
- * return WPS_NAK with config_error:0. But the
- * connection would still proceed. Hence don't let
- * state machine depend on WSC DONE.
- */
- WL_INFORM_MEM(("[%s][WPS] WSC_DONE\n", ndev->name));
- }
- }
- }
+static int
+wl_get_ap_he_mode(struct net_device *ndev, struct bcm_cfg80211 *cfg, bool *he)
+{
+ bcm_xtlv_t read_he_xtlv;
+ int ret = 0;
+ u8 he_enab = 0;
+ u32 he_feature = 0;
+ *he = FALSE;
- if (eap->code == EAP_CODE_FAILURE) {
- /* EAP_FAIL */
- WL_INFORM_MEM(("[%s][WPS] EAP_FAIL\n", ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_EAP_FAIL, peer_mac);
- }
+ /* Check he enab first */
+ read_he_xtlv.id = WL_HE_CMD_ENAB;
+ read_he_xtlv.len = 0;
+
+ ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* HE not supported */
+ ret = BCME_OK;
+ } else {
+ WL_ERR(("HE ENAB get failed. ret=%d\n", ret));
}
+ goto exit;
+ } else {
+ he_enab = *(u8*)cfg->ioctl_buf;
+ }
+
+ if (!he_enab) {
+ goto exit;
+ }
+
+ /* Then check BIT3 of he features */
+ read_he_xtlv.id = WL_HE_CMD_FEATURES;
+ read_he_xtlv.len = 0;
+
+ ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ WL_ERR(("HE FEATURE get failed. error=%d\n", ret));
+ goto exit;
+ } else {
+ he_feature = *(int*)cfg->ioctl_buf;
+ he_feature = dtoh32(he_feature);
+ }
+
+ if (he_feature & WL_HE_FEATURES_HE_AP) {
+ WL_DBG(("HE is enabled in AP\n"));
+ *he = TRUE;
}
+exit:
+ return ret;
}
-#endif /* WL_WPS_SYNC */
-const u8 *
-wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
+static void
+wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec)
{
- const u8 *attrib;
- u16 attrib_id;
- u16 attrib_len;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct wiphy *wiphy = wdev->wiphy;
+ int ret = BCME_OK;
+ u32 bw_cap;
+ u32 ctl_chan;
+ chanspec_t chanbw = WL_CHANSPEC_BW_20;
- if (!buf) {
- WL_ERR(("buf null\n"));
- return NULL;
+ /* Update channel in profile */
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ wl_update_prof(cfg, ndev, NULL, &ctl_chan, WL_PROF_CHAN);
+
+ /* BW cap is only updated in 5GHz */
+ if (ctl_chan <= CH_MAX_2G_CHANNEL)
+ return;
+
+ /* Get WL BW CAP */
+ ret = wl_get_bandwidth_cap(bcmcfg_to_prmry_ndev(cfg),
+ IEEE80211_BAND_5GHZ, &bw_cap);
+ if (ret < 0) {
+ WL_ERR(("get bw_cap failed = %d\n", ret));
+ goto exit;
}
- attrib = buf;
- while (len >= 4) {
- /* attribute id */
- attrib_id = *attrib++ << 8;
- attrib_id |= *attrib++;
- len -= 2;
+ chanbw = CHSPEC_BW(channel_to_chanspec(wiphy,
+ ndev, wf_chspec_ctlchan(chanspec), bw_cap));
- /* 2-byte little endian */
- attrib_len = *attrib++ << 8;
- attrib_len |= *attrib++;
+exit:
+ cfg->bw_cap_5g = bw2cap[chanbw >> WL_CHANSPEC_BW_SHIFT];
+ WL_INFORM_MEM(("supported bw cap is:0x%x\n", cfg->bw_cap_5g));
- len -= 2;
- if (attrib_id == element_id) {
- /* This will point to start of subelement attrib after
- * attribute id & len
- */
- return attrib;
- }
- if (len > attrib_len) {
- len -= attrib_len; /* for the remaining subelt fields */
- WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
- attrib_id, attrib_len, len));
+}
- /* Go to next subelement */
- attrib += attrib_len;
- } else {
- WL_ERR(("Incorrect Attribue:%4x attrib_len:%d\n",
- attrib_id, attrib_len));
- return NULL;
- }
- }
- return NULL;
+int
+wl_rxchain_to_opmode_nss(int rxchain)
+{
+ /*
+ * Nss 1 -> 0, Nss 2 -> 1
+ * This is from operating mode field
+ * in 8.4.1.50 of 802.11ac-2013
+ */
+ /* TODO : Nss 3 ? */
+ if (rxchain == 3)
+ return (1 << 4);
+ else
+ return 0;
}
-static const u8 *
-wl_retrieve_wps_attribute(const u8 *buf, u16 element_id)
+int
+wl_update_opmode(struct net_device *ndev, u32 bw)
{
- const wl_wps_ie_t *ie = NULL;
- u16 len = 0;
- const u8 *attrib;
+ int ret = BCME_OK;
+ int oper_mode;
+ int rxchain;
- if (!buf) {
- WL_ERR(("WPS IE not present"));
- return 0;
+ ret = wldev_iovar_getint(ndev, "rxchain", (s32 *)&rxchain);
+ if (ret < 0) {
+ WL_ERR(("get rxchain failed = %d\n", ret));
+ goto exit;
}
- ie = (const wl_wps_ie_t*) buf;
- len = ie->len;
+ oper_mode = bw;
+ oper_mode |= wl_rxchain_to_opmode_nss(rxchain);
+ /* Enable flag */
+ oper_mode |= OPER_MODE_ENABLE;
- /* Point subel to the P2P IE's subelt field.
- * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
- */
- attrib = ie->attrib;
- len -= 4; /* exclude OUI + OUI_TYPE */
+ ret = wldev_iovar_setint(ndev, "oper_mode", oper_mode);
+ if (ret < 0) {
+ WL_ERR(("set oper_mode failed = %d\n", ret));
+ goto exit;
+ }
- /* Search for attrib */
- return wl_find_attribute(attrib, len, element_id);
+exit:
+ return ret;
}
-#define WPS_ATTR_REQ_TYPE 0x103a
-#define WPS_REQ_TYPE_ENROLLEE 0x01
-bool
-wl_is_wps_enrollee_active(struct net_device *ndev, const u8 *ie_ptr, u16 len)
+int
+wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname)
{
- const u8 *ie;
- const u8 *attrib;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ int ret = BCME_OK;
+ u32 *channel;
+ bool he;
- if ((ie = (const u8 *)wl_cfgp2p_find_wpsie(ie_ptr, len)) == NULL) {
- WL_DBG(("WPS IE not present. Do nothing.\n"));
- return false;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
}
- if ((attrib = wl_retrieve_wps_attribute(ie, WPS_ATTR_REQ_TYPE)) == NULL) {
- WL_DBG(("WPS_ATTR_REQ_TYPE not found!\n"));
- return false;
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
}
- if (*attrib == WPS_REQ_TYPE_ENROLLEE) {
- WL_INFORM_MEM(("WPS Enrolle Active\n"));
- return true;
- } else {
- WL_DBG(("WPS_REQ_TYPE:%d\n", *attrib));
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
}
- return false;
-}
+ if (bw > DOT11_OPER_MODE_160MHZ) {
+ WL_ERR(("BW is too big %d\n", bw));
+ return BCME_BADARG;
+ }
-#ifdef USE_WFA_CERT_CONF
-extern int g_frameburst;
-#endif /* USE_WFA_CERT_CONF */
+ channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+ if (*channel <= CH_MAX_2G_CHANNEL) {
+ WL_ERR(("current channel is %d, not supported\n", *channel));
+ ret = BCME_BADCHAN;
+ goto exit;
+ }
+
+ if ((DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
+ wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) ||
+ cfg->nan_enable) {
+ WL_ERR(("BW control in concurrent mode is not supported\n"));
+ return BCME_BUSY;
+ }
+
+ /* When SCAN is on going either in STA or in AP, return BUSY */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("STA is SCANNING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* When SCANABORT is on going either in STA or in AP, return BUSY */
+ if (wl_get_drv_status_all(cfg, SCAN_ABORTING)) {
+ WL_ERR(("STA is SCAN_ABORTING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* When CONNECTION is on going in STA, return BUSY */
+ if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+ WL_ERR(("STA is CONNECTING, not support BW control\n"));
+ return BCME_BUSY;
+ }
+
+ /* BW control in AX mode needs more verification */
+ ret = wl_get_ap_he_mode(ndev, cfg, &he);
+ if (ret == BCME_OK && he) {
+ WL_ERR(("BW control in HE mode is not supported\n"));
+ return BCME_UNSUPPORTED;
+ }
+ if (ret < 0) {
+ WL_ERR(("Check AX mode is failed\n"));
+ goto exit;
+ }
+
+ if ((!WL_BW_CAP_160MHZ(cfg->bw_cap_5g) && (bw == DOT11_OPER_MODE_160MHZ)) ||
+ (!WL_BW_CAP_80MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_80MHZ)) ||
+ (!WL_BW_CAP_40MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_40MHZ)) ||
+ (!WL_BW_CAP_20MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_20MHZ))) {
+ WL_ERR(("bw_cap %x does not support bw = %d\n", cfg->bw_cap_5g, bw));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ WL_DBG(("Updating AP BW to %d\n", op2bw[bw]));
+
+ ret = wl_update_opmode(ndev, bw);
+ if (ret < 0) {
+ WL_ERR(("opmode set failed = %d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
int
-wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
+wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len)
{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
int ret = BCME_OK;
- int val = enable ? 1 : 0;
+ u32 chanspec = 0;
+ u32 bw = DOT11_OPER_MODE_20MHZ;
+ int bytes_written = 0;
-#ifdef USE_WFA_CERT_CONF
- if (!g_frameburst) {
- WL_DBG(("Skip setting frameburst\n"));
- return 0;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+
+ if (!dhdp) {
+ return BCME_NOTUP;
}
-#endif /* USE_WFA_CERT_CONF */
- WL_DBG(("Set frameburst %d\n", val));
- ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
+
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
+ }
+
+ ret = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
if (ret < 0) {
- WL_ERR(("Failed set frameburst, ret=%d\n", ret));
+ WL_ERR(("get chanspec from AP failed = %d\n", ret));
+ goto exit;
+ }
+
+ chanspec = wl_chspec_driver_to_host(chanspec);
+
+ if (CHSPEC_IS20(chanspec)) {
+ bw = DOT11_OPER_MODE_20MHZ;
+ } else if (CHSPEC_IS40(chanspec)) {
+ bw = DOT11_OPER_MODE_40MHZ;
+ } else if (CHSPEC_IS80(chanspec)) {
+ bw = DOT11_OPER_MODE_80MHZ;
+ } else if (CHSPEC_IS_BW_160_WIDE(chanspec)) {
+ bw = DOT11_OPER_MODE_160MHZ;
} else {
- WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
+ WL_ERR(("chanspec error %x\n", chanspec));
+ ret = BCME_BADCHAN;
+ goto exit;
}
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "bw=%d", bw);
+ ret = bytes_written;
+exit:
return ret;
}
-#ifdef WL_MBO
-static s32
-wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+static void
+wl_restore_ap_bw(struct bcm_cfg80211 *cfg)
{
- s32 err = 0;
- wl_event_mbo_t *mbo_evt = (wl_event_mbo_t *)data;
- wl_event_mbo_cell_nw_switch_t *cell_sw_evt = NULL;
+ int ret = BCME_OK;
+ u32 bw;
+ bool he = FALSE;
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
+ u32 *channel;
- WL_INFORM(("MBO: Evt %u\n", mbo_evt->type));
+ if (!cfg) {
+ return;
+ }
- if (mbo_evt->version != WL_MBO_EVT_VER) {
- WL_ERR(("%s:version mismatch. rcvd %u expected %u\n",
- __FUNCTION__, mbo_evt->version, WL_MBO_EVT_VER));
- return -1;
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ for_each_ndev(cfg, iter, next) {
+ GCC_DIAGNOSTIC_POP();
+ if (iter->ndev) {
+ if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ channel = (u32 *)wl_read_prof(cfg, iter->ndev, WL_PROF_CHAN);
+ if (*channel > CH_MAX_2G_CHANNEL) {
+ ndev = iter->ndev;
+ break;
+ }
+ }
+ }
}
- if (mbo_evt->type == WL_MBO_E_CELLULAR_NW_SWITCH) {
- cell_sw_evt = (wl_event_mbo_cell_nw_switch_t *)mbo_evt->data;
- BCM_REFERENCE(cell_sw_evt);
- SUPP_EVENT(("CTRL-EVENT-CELLULAR-SWITCH", "reason %d cur_assoc_time_left %u "
- "reassoc_delay %u\n", cell_sw_evt->reason,
- cell_sw_evt->assoc_time_remain, cell_sw_evt->reassoc_delay));
+
+ if (!ndev) {
+ return;
+ }
+
+ /* BW control in AX mode not allowed */
+ ret = wl_get_ap_he_mode(bcmcfg_to_prmry_ndev(cfg), cfg, &he);
+ if (ret == BCME_OK && he) {
+ return;
+ }
+ if (ret < 0) {
+ WL_ERR(("Check AX mode is failed\n"));
+ return;
+ }
+
+ if (WL_BW_CAP_160MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_160MHZ;
+ } else if (WL_BW_CAP_80MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_80MHZ;
+ } else if (WL_BW_CAP_40MHZ(cfg->bw_cap_5g)) {
+ bw = DOT11_OPER_MODE_40MHZ;
+ } else {
+ return;
+ }
+
+ WL_DBG(("Restoring AP BW to %d\n", op2bw[bw]));
+
+ ret = wl_update_opmode(ndev, bw);
+ if (ret < 0) {
+ WL_ERR(("bw restore failed = %d\n", ret));
+ return;
}
- return err;
}
-#endif /* WL_MBO */
+#endif /* SUPPORT_AP_BWCTRL */
s32
wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len)
} else if (cfg->autochannel == 2) {
bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
cfg->best_2g_ch, cfg->best_5g_ch);
- ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ WL_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
ret = bytes_written;
}
return ret;
}
-static int
+int
wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
struct net_device *dev, uint action, enum wl_ext_status status, void *context)
{
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
struct wl_security *sec;
s32 bssidx = -1;
- int ret = 0, cur_eapol_status;
+ int ret = 0, cur_eapol_status, ifidx;
int max_wait_time, max_wait_cnt;
+ int suppressed = 0;
mutex_lock(&cfg->in4way_sync);
- WL_DBG(("status=%d, action=0x%x\n", status, action));
+ action = action & dhdp->conf->in4way;
+ WL_DBG(("status=%d, action=0x%x, in4way=0x%x\n", status, action, dhdp->conf->in4way));
cur_eapol_status = dhdp->conf->eapol_status;
switch (status) {
case WL_EXT_STATUS_SCAN:
+ wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
+ if (suppressed) {
+ WL_ERR(("scan suppressed\n"));
+ ret = -EBUSY;
+ break;
+ }
if (action & NO_SCAN_IN4WAY) {
if (cfg->handshaking > 0 && cfg->handshaking <= 3) {
WL_ERR(("return -EBUSY cnt %d\n", cfg->handshaking));
}
break;
case WL_EXT_STATUS_DISCONNECTING:
- if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
- cur_eapol_status < EAPOL_STATUS_WPA_END) {
+ if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
+ cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
WL_ERR(("WPA failed at %d\n", cur_eapol_status));
dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+ } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
+ cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
WL_ERR(("WPS failed at %d\n", cur_eapol_status));
dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
}
if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
if (cfg->handshaking) {
- if (action & NO_BTC_IN4WAY) {
- WL_TRACE(("status=%d, enable btc_mode\n", status));
- wldev_iovar_setint(dev, "btc_mode", 1);
+ if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
+ WL_TRACE(("status=%d, restore btc_mode %d\n",
+ status, cfg->btc_mode));
+ wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
}
cfg->handshaking = 0;
}
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
if ((sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) &&
bssidx == 0) {
- dhdp->conf->eapol_status = EAPOL_STATUS_WPA_START;
+ dhdp->conf->eapol_status = EAPOL_STATUS_4WAY_START;
cfg->handshaking = 1;
if (action & NO_BTC_IN4WAY) {
- WL_TRACE(("status=%d, disable btc_mode\n", status));
- wldev_iovar_setint(dev, "btc_mode", 0);
+ ret = wldev_iovar_getint(dev, "btc_mode", &cfg->btc_mode);
+ if (!ret && cfg->btc_mode) {
+ WL_TRACE(("status=%d, disable current btc_mode %d\n",
+ status, cfg->btc_mode));
+ wldev_iovar_setint(dev, "btc_mode", 0);
+ }
}
}
}
}
break;
case WL_EXT_STATUS_CONNECTED:
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) {
- dhd_conf_set_wme(cfg->pub, 0);
- dhd_conf_set_intiovar(cfg->pub, WLC_SET_VAR, "phy_oclscdenable",
- cfg->pub->conf->phy_oclscdenable, 0, FALSE);
+ ifidx = dhd_net2idx(dhdp->info, dev);
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION && ifidx >= 0) {
+ dhd_conf_set_wme(cfg->pub, ifidx, 0);
}
else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_CLIENT, -1);
}
break;
case WL_EXT_STATUS_DISCONNECTED:
- if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
- cur_eapol_status < EAPOL_STATUS_WPA_END) {
+ if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
+ cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
WL_ERR(("WPA failed at %d\n", cur_eapol_status));
dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+ } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
+ cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
WL_ERR(("WPS failed at %d\n", cur_eapol_status));
dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
}
if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
if (cfg->handshaking) {
- if (action & NO_BTC_IN4WAY) {
- WL_TRACE(("status=%d, enable btc_mode\n", status));
- wldev_iovar_setint(dev, "btc_mode", 1);
+ if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
+ WL_TRACE(("status=%d, restore btc_mode %d\n",
+ status, cfg->btc_mode));
+ wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
}
cfg->handshaking = 0;
}
}
break;
case WL_EXT_STATUS_ADD_KEY:
- dhdp->conf->eapol_status = EAPOL_STATUS_WPA_END;
+ dhdp->conf->eapol_status = EAPOL_STATUS_4WAY_DONE;
if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
if (cfg->handshaking) {
- if (action & NO_BTC_IN4WAY) {
- WL_TRACE(("status=%d, enable btc_mode\n", status));
- wldev_iovar_setint(dev, "btc_mode", 1);
+ if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
+ WL_TRACE(("status=%d, restore btc_mode %d\n",
+ status, cfg->btc_mode));
+ wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
}
cfg->handshaking = 0;
}
}
break;
case WL_EXT_STATUS_AP_ENABLED:
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
- dhd_conf_set_wme(cfg->pub, 1);
+ ifidx = dhd_net2idx(dhdp->info, dev);
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP && ifidx >= 0) {
+ dhd_conf_set_wme(cfg->pub, ifidx, 1);
}
else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_GO, -1);
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
u8* mac_addr = context;
if (mac_addr && memcmp(ðer_bcast, mac_addr, ETHER_ADDR_LEN) &&
- cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+ cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
u32 timeout;
- max_wait_time = dhdp->conf->max_wait_gc_time;
+ max_wait_time = 300;
WL_TRACE(("status=%d, wps_done=%d, waiting %dms ...\n",
status, cfg->wps_done, max_wait_time));
mutex_unlock(&cfg->in4way_sync);
case WL_EXT_STATUS_STA_DISCONNECTED:
if ((action & DONT_DELETE_GC_AFTER_WPS) &&
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
- cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+ cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
WL_TRACE(("status=%d, wps_done=%d => 0\n", status, cfg->wps_done));
cfg->wps_done = FALSE;
}
case WL_EXT_STATUS_STA_CONNECTED:
if ((action & DONT_DELETE_GC_AFTER_WPS) &&
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
- cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+ cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
WL_TRACE(("status=%d, wps_done=%d => 1\n", status, cfg->wps_done));
cfg->wps_done = TRUE;
wake_up_interruptible(&cfg->wps_done_event);
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.h 771488 2018-07-10 09:10:11Z $
+ * $Id: wl_cfg80211.h 825255 2019-06-13 12:26:42Z $
*/
/**
#include <net/cfg80211.h>
#include <linux/rfkill.h>
#include <osl.h>
-
#include <dngl_stats.h>
#include <dhd.h>
+
+#define WL_CFG_DRV_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_DRV_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_WPS_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_WPS_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_NET_LIST_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_NET_LIST_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_EQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_EQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_BAM_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_BAM_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
+#define WL_CFG_VNDR_OUI_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
+#define WL_CFG_VNDR_OUI_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
+
#include <wl_cfgp2p.h>
#include <wl_android.h>
#ifdef WL_NAN
struct wl_security;
struct wl_ibss;
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || defined(WL_FILS_BACKPORT))
-#define WL_FILS
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) && !defined(WL_SAE))
+#define WL_SAE
+#endif // endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) && !defined(WL_SCAN_TYPE))
+#define WL_SCAN_TYPE
+#endif /* WL_SCAN_TYPE */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) && !defined(WL_FILS_ROAM_OFFLD)
+#define WL_FILS_ROAM_OFFLD
+#endif // endif
+
+#ifdef WL_SAE
+#define IS_AKM_SAE(akm) (akm == WLAN_AKM_SUITE_SAE)
+#else
+#define IS_AKM_SAE(akm) FALSE
+#endif // endif
+#ifdef WL_OWE
+#define IS_AKM_OWE(akm) (akm == WLAN_AKM_SUITE_OWE)
+#else
+#define IS_AKM_OWE(akm) FALSE
#endif // endif
#define htod32(i) (i)
#define WL_DBG_INFO (1 << 1)
#define WL_DBG_ERR (1 << 0)
+#ifndef WAIT_FOR_DISCONNECT_MAX
+#define WAIT_FOR_DISCONNECT_MAX 10
+#endif /* WAIT_FOR_DISCONNECT_MAX */
+#define WAIT_FOR_DISCONNECT_STATE_SYNC 10
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
/* Newer kernels use defines from nl80211.h */
#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
+#define IEEE80211_BAND_60GHZ NL80211_BAND_60GHZ
#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
#endif /* LINUX_VER >= 4.7 */
int binary_len, const char *fmt, ...);
extern char *dhd_log_dump_get_timestamp(void);
#ifndef _DHD_LOG_DUMP_DEFINITIONS_
-#define DLD_BUF_TYPE_GENERAL 0
-#define DLD_BUF_TYPE_SPECIAL 1
#define DHD_LOG_DUMP_WRITE(fmt, ...) \
dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
#endif /* DHD_LOG_DUMP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
+ defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ BCM_REFERENCE(loc_gen); \
+ cfg80211_disconnected(dev, reason, ie, len, gfp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
+
/* 0 invalidates all debug messages. default is 1 */
#define WL_DBG_LEVEL 0xFF
-#define CFG80211_INFO_TEXT "CFG80211-INFO) "
-#define CFG80211_ERROR_TEXT "CFG80211-ERROR) "
+#define CFG80211_INFO_TEXT "[dhd] CFG80211-INFO) "
+#define CFG80211_ERROR_TEXT "[dhd] CFG80211-ERROR) "
#if defined(DHD_DEBUG)
#ifdef DHD_LOG_DUMP
-#define WL_ERR(args) \
+#define WL_ERR_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE args; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
} \
} while (0)
-#define WL_ERR_KERN(args) \
+#define WL_ERR(x) WL_ERR_MSG x
+#define WL_ERR_KERN_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
} \
} while (0)
-#define WL_ERR_MEM(args) \
+#define WL_ERR_KERN(x) WL_ERR_KERN_MSG x
+#define WL_ERR_MEM_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE args; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
} \
} while (0)
-#define WL_INFORM_MEM(args) \
+#define WL_ERR_MEM(x) WL_ERR_MEM_MSG x
+#define WL_INFORM_MEM_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO CFG80211_INFO_TEXT "%s : ", __func__); \
- printk args; \
+ printk(KERN_INFO CFG80211_INFO_TEXT "%s : " x, __func__, ## args); \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE args; \
+ DHD_LOG_DUMP_WRITE(x, ## args); \
} \
} while (0)
+#define WL_INFORM_MEM(x) WL_INFORM_MEM_MSG x
#define WL_ERR_EX(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE_EX args; \
} \
DHD_LOG_DUMP_WRITE args; \
} while (0)
#else
-#define WL_ERR(args) \
+#define WL_ERR_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
- } \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
} while (0)
+#define WL_ERR(x) WL_ERR_MSG x
#define WL_ERR_KERN(args) WL_ERR(args)
#define WL_ERR_MEM(args) WL_ERR(args)
#define WL_INFORM_MEM(args) WL_INFORM(args)
#define WL_MEM(args) WL_DBG(args)
#endif /* DHD_LOG_DUMP */
#else /* defined(DHD_DEBUG) */
-#define WL_ERR(args) \
+#define WL_ERR_MSG(x, args...) \
do { \
if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
- printk args; \
- } \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ } \
} while (0)
+#define WL_ERR(x) WL_ERR_MSG x
#define WL_ERR_KERN(args) WL_ERR(args)
#define WL_ERR_MEM(args) WL_ERR(args)
#define WL_INFORM_MEM(args) WL_INFORM(args)
#undef WL_INFORM
#endif // endif
-#define WL_INFORM(args) \
+#define WL_INFORM_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \
- printk args; \
- } \
-} while (0)
-
-#define WL_MSG(arg1, args...) \
-do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO "[%s] %s : ", arg1, __func__); \
- printk(args); \
+ printk(KERN_INFO "[dhd] CFG80211-INFO) %s : " x, __func__, ## args); \
} \
} while (0)
+#define WL_INFORM(x) WL_INFORM_MSG x
#ifdef WL_SCAN
#undef WL_SCAN
#endif // endif
-#define WL_SCAN(args) \
+#define WL_SCAN_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_SCAN) { \
- printk(KERN_INFO "CFG80211-SCAN) %s :", __func__); \
- printk args; \
+ printk(KERN_INFO "[dhd] CFG80211-SCAN) %s :" x, __func__, ## args); \
} \
} while (0)
+#define WL_SCAN(x) WL_SCAN_MSG x
#ifdef WL_TRACE
#undef WL_TRACE
#endif // endif
-#define WL_TRACE(args) \
+#define WL_TRACE_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_TRACE) { \
- printk(KERN_INFO "CFG80211-TRACE) %s :", __func__); \
- printk args; \
+ printk(KERN_INFO "[dhd] CFG80211-TRACE) %s :" x, __func__, ## args); \
} \
} while (0)
+#define WL_TRACE(x) WL_TRACE_MSG x
#ifdef WL_TRACE_HW4
#undef WL_TRACE_HW4
#endif // endif
#define WL_TRACE_HW4 WL_TRACE
#if (WL_DBG_LEVEL > 0)
-#define WL_DBG(args) \
+#define WL_DBG_MSG(x, args...) \
do { \
if (wl_dbg_level & WL_DBG_DBG) { \
- printk(KERN_INFO "CFG80211-DEBUG) %s :", __func__); \
- printk args; \
+ printk(KERN_INFO "[dhd] CFG80211-DEBUG) %s :" x, __func__, ## args); \
} \
} while (0)
+#define WL_DBG(x) WL_DBG_MSG x
#else /* !(WL_DBG_LEVEL > 0) */
#define WL_DBG(args)
#endif /* (WL_DBG_LEVEL > 0) */
#define WL_MED_DWELL_TIME 400
#define WL_MIN_DWELL_TIME 100
#define WL_LONG_DWELL_TIME 1000
-#define IFACE_MAX_CNT 4
+#define IFACE_MAX_CNT 5
#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
/* Cipher suites */
#ifndef WLAN_CIPHER_SUITE_PMK
-#define WLAN_CIPHER_SUITE_PMK 0x00904C00
+#define WLAN_CIPHER_SUITE_PMK 0x00904C00
#endif /* WLAN_CIPHER_SUITE_PMK */
#ifndef WLAN_AKM_SUITE_FT_8021X
-#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
+#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
#endif /* WLAN_AKM_SUITE_FT_8021X */
#ifndef WLAN_AKM_SUITE_FT_PSK
-#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
+#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
#endif /* WLAN_AKM_SUITE_FT_PSK */
+#ifndef WLAN_AKM_SUITE_8021X_SUITE_B
+#define WLAN_AKM_SUITE_8021X_SUITE_B 0x000FAC0B
+#define WLAN_AKM_SUITE_8021X_SUITE_B_192 0x000FAC0C
+#endif /* WLAN_AKM_SUITE_8021X_SUITE_B */
+
+/* TODO: even in upstream linux(v5.0), FT-1X-SHA384 isn't defined and supported yet.
+ * need to revisit here to sync correct name later.
+ */
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 0x000FAC0D
+
+#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+
#ifndef WLAN_AKM_SUITE_FILS_SHA256
-#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
-#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
-#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
-#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
+#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
+#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
#endif /* WLAN_AKM_SUITE_FILS_SHA256 */
+#define MIN_VENDOR_EXTN_IE_LEN 2
+#ifdef WL_OWE
+#ifndef WLAN_AKM_SUITE_OWE
+#define WLAN_AKM_SUITE_OWE 0X000FAC12
+#endif /* WPA_KEY_MGMT_OWE */
+#endif /* WL_OWE */
+
/*
* BRCM local.
* Use a high number that's unlikely to clash with linux upstream for a while until we can
#define FILS_INDICATION_IE_TAG_FIXED_LEN 2
#endif // endif
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
+(entry) = list_first_entry((ptr), type, member); \
+GCC_DIAGNOSTIC_POP(); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
+entry = container_of((ptr), type, member); \
+GCC_DIAGNOSTIC_POP(); \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
/* driver status */
enum wl_status {
WL_STATUS_READY = 0,
};
typedef enum wl_iftype {
- WL_IF_TYPE_STA = 0,
- WL_IF_TYPE_AP = 1,
- WL_IF_TYPE_AWDL = 2,
- WL_IF_TYPE_NAN_NMI = 3,
- WL_IF_TYPE_NAN = 4,
- WL_IF_TYPE_P2P_GO = 5,
- WL_IF_TYPE_P2P_GC = 6,
- WL_IF_TYPE_P2P_DISC = 7,
- WL_IF_TYPE_IBSS = 8,
- WL_IF_TYPE_MONITOR = 9,
- WL_IF_TYPE_AIBSS = 10,
- WL_IF_TYPE_MAX
+ WL_IF_TYPE_STA = 0,
+ WL_IF_TYPE_AP = 1,
+ WL_IF_TYPE_NAN_NMI = 3,
+ WL_IF_TYPE_NAN = 4,
+ WL_IF_TYPE_P2P_GO = 5,
+ WL_IF_TYPE_P2P_GC = 6,
+ WL_IF_TYPE_P2P_DISC = 7,
+ WL_IF_TYPE_IBSS = 8,
+ WL_IF_TYPE_MONITOR = 9,
+ WL_IF_TYPE_AIBSS = 10,
+ WL_IF_TYPE_MAX
} wl_iftype_t;
typedef enum wl_interface_state {
/* wi-fi mode */
enum wl_mode {
- WL_MODE_BSS,
- WL_MODE_IBSS,
- WL_MODE_AP,
- WL_MODE_AWDL,
- WL_MODE_NAN
+ WL_MODE_BSS = 0,
+ WL_MODE_IBSS = 1,
+ WL_MODE_AP = 2,
+ WL_MODE_NAN = 4,
+ WL_MODE_MAX
};
/* driver profile list */
WL_PROF_BSSID,
WL_PROF_ACT,
WL_PROF_BEACONINT,
- WL_PROF_DTIMPERIOD
+ WL_PROF_DTIMPERIOD,
+ WL_PROF_LATEST_BSSID
};
/* donlge escan state */
TDLS_STATE_TEARDOWN,
TDLS_STATE_IF_CREATE,
TDLS_STATE_IF_DELETE,
- TDLS_STATE_NDI_CREATE
+ TDLS_STATE_NMI_CREATE
};
/* beacon / probe_response */
u8 assoc_req_ie[VNDR_IES_BUF_LEN];
u8 assoc_res_ie[VNDR_IES_BUF_LEN];
u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 disassoc_ie[VNDR_IES_BUF_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 assoc_req_ie_len;
u32 assoc_res_ie_len;
u32 beacon_ie_len;
+ u32 disassoc_ie_len;
} wl_bss_vndr_ies_t;
typedef struct wl_cfgbss {
u16 beacon_interval;
u8 dtim_period;
bool active;
+ u8 latest_bssid[ETHER_ADDR_LEN];
};
struct wl_wps_ie {
} wl_bcnrecv_state_t;
typedef enum wl_bcnrecv_reason {
- WL_BCNRECV_USER_TRIGGER = 0,
+ WL_BCNRECV_INVALID = 0,
+ WL_BCNRECV_USER_TRIGGER,
WL_BCNRECV_SUSPEND,
WL_BCNRECV_SCANBUSY,
WL_BCNRECV_CONCURRENCY,
- WL_BCNRECV_LISTENBUSY
+ WL_BCNRECV_LISTENBUSY,
+ WL_BCNRECV_ROAMABORT,
+ WL_BCNRECV_HANG
} wl_bcnrecv_reason_t;
+
+typedef enum wl_bcnrecv_status {
+ WL_BCNRECV_STARTED = 0,
+ WL_BCNRECV_STOPPED,
+ WL_BCNRECV_ABORTED,
+ WL_BCNRECV_SUSPENDED,
+ WL_BCNRECV_MAX
+} wl_bcnrecv_status_t;
+
+typedef enum wl_bcnrecv_attr_type {
+ BCNRECV_ATTR_STATUS = 1,
+ BCNRECV_ATTR_REASON,
+ BCNRECV_ATTR_BCNINFO
+} wl_bcnrecv_attr_type_t;
#endif /* WL_BCNRECV */
+#ifdef WL_CHAN_UTIL
+#define CU_ATTR_PERCENTAGE 1
+#define CU_ATTR_HDR_LEN 30
+#endif /* WL_CHAN_UTIL */
/* association inform */
#define MAX_REQ_LINE 1024u
u32 resp_len;
};
-#define WL_MIN_PMKID_LIST_V2_FW_MAJOR 12
+#define MIN_PMKID_LIST_V3_FW_MAJOR 13
+#define MIN_PMKID_LIST_V3_FW_MINOR 0
+
+#define MIN_PMKID_LIST_V2_FW_MAJOR 12
#define MIN_PMKID_LIST_V2_FW_MINOR 0
+
+#define MIN_ESCAN_PARAM_V2_FW_MAJOR 14
+#define MIN_ESCAN_PARAM_V2_FW_MINOR 0
+
/* wpa2 pmk list */
struct wl_pmk_list {
- pmkid_list_v2_t pmkids;
- pmkid_v2_t foo[MAXPMKID - 1];
+ pmkid_list_v3_t pmkids;
+ pmkid_v3_t foo[MAXPMKID - 1];
};
+#define KEY_PERM_PMK 0xFFFFFFFF
+
#ifdef DHD_MAX_IFS
#define WL_MAX_IFS DHD_MAX_IFS
#else
#define WL_MAX_IFS 16
#endif // endif
+#define MAC_RAND_BYTES 3
#define ESCAN_BUF_SIZE (64 * 1024)
struct escan_info {
#ifdef WL11U
/* Max length of Interworking element */
-#define IW_IES_MAX_BUF_LEN 9
+#define IW_IES_MAX_BUF_LEN 8
#endif // endif
#ifdef WLFBT
#define FBT_KEYLEN 32
#ifdef SUPPORT_AP_RADIO_PWRSAVE
typedef struct ap_rps_info {
bool enable;
- bool sta_assoc_check;
+ int sta_assoc_check;
int pps;
int quiet_time;
int level;
} wl_rssi_ant_mimo_t;
#endif /* SUPPORT_RSSI_SUM_REPORT */
-#ifdef DHD_LB_IRQSET
-#if defined(CONFIG_ARCH_MSM8998) || defined(CONFIG_ARCH_SDM845)
-#define WL_IRQSET
-#endif /* CONFIG_ARCH_MSM8998 | CONFIG_ARCH_SDM845) */
-#endif /* DHD_LB_IRQSET */
+/* MBO-OCE prune event reason codes */
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef enum wl_prune_evt_reason {
+ WIFI_PRUNE_UNSPECIFIED = 0, /* Unspecified event reason code */
+ WIFI_PRUNE_ASSOC_RETRY_DELAY = 1, /* MBO assoc retry delay */
+ WIFI_PRUNE_RSSI_ASSOC_REJ = 2 /* OCE RSSI-based assoc rejection */
+} wl_prune_evt_reason_t;
+#endif /* WL_MBO || WL_OCE */
#ifdef WL_MBO
typedef struct wl_event_mbo wl_event_mbo_t;
typedef struct wl_event_mbo_cell_nw_switch wl_event_mbo_cell_nw_switch_t;
+typedef struct wl_btm_event_type_data wl_btm_event_type_data_t;
#endif /* WL_MBO */
+#if defined(WL_MBO) || defined(WL_OCE)
+typedef struct wl_bssid_prune_evt_info wl_bssid_pruned_evt_info_t;
+#endif /* WL_MBO || WL_OCE */
+
#ifdef WL_NAN
-#define NAN_MAX_NDI 2
+#define NAN_MAX_NDI 1u
typedef struct wl_ndi_data
{
u8 ifname[IFNAMSIZ];
u8 in_use;
u8 created;
+ struct net_device *nan_ndev;
} wl_ndi_data_t;
+
typedef struct wl_nancfg
{
+ wl_nan_ver_t version;
wl_ndi_data_t ndi[NAN_MAX_NDI];
struct mutex nan_sync;
uint8 svc_inst_id_mask[NAN_SVC_INST_SIZE];
wait_queue_head_t nan_event_wait;
nan_stop_reason_code_t disable_reason;
bool mac_rand;
- nan_range_types_t range_type;
+ int range_type;
+ uint8 max_ndp_count; /* Max no. of NDPs */
+ nan_ndp_peer_t *nan_ndp_peer_info;
+ nan_data_path_id ndp_id[NAN_MAX_NDP_PEER];
} wl_nancfg_t;
+
+#ifdef WL_NANP2P
+#define WL_CFG_P2P_DISC_BIT 0x1u
+#define WL_CFG_NAN_DISC_BIT 0x2u
+#define WL_NANP2P_CONC_SUPPORT (WL_CFG_P2P_DISC_BIT | WL_CFG_NAN_DISC_BIT)
+#endif /* WL_NAN2P */
#endif /* WL_NAN */
+#ifdef WL_IFACE_MGMT
+#define WL_IFACE_NOT_PRESENT -1
+
+typedef enum iface_conc_policy {
+ WL_IF_POLICY_DEFAULT = 0,
+ WL_IF_POLICY_FCFS = 1,
+ WL_IF_POLICY_LP = 2,
+ WL_IF_POLICY_ROLE_PRIORITY = 3,
+ WL_IF_POLICY_CUSTOM = 4,
+ WL_IF_POLICY_INVALID
+} iface_conc_policy_t;
+
+typedef struct iface_mgmt_data {
+ uint8 policy;
+ uint8 priority[WL_IF_TYPE_MAX];
+} iface_mgmt_data_t;
+#endif /* WL_IFACE_MGMT */
+
#ifdef WL_WPS_SYNC
#define EAP_PACKET 0
#define EAP_EXPANDED_TYPE 254
#define WPS_MAX_SESSIONS 2
typedef struct wl_wps_session {
bool in_use;
- struct timer_list timer;
+ timer_list_compat_t timer;
struct net_device *ndev;
wl_wps_state_t state;
u16 mode;
#endif /* WL_WPS_SYNC */
#ifndef WL_STATIC_IFNAME_PREFIX
-#define WL_STATIC_IFNAME_PREFIX "wlan1"
+#define WL_STATIC_IFNAME_PREFIX "wlan%d"
#endif /* WL_STATIC_IFNAME */
+typedef struct buf_data {
+ u32 ver; /* version of struct */
+ u32 len; /* Total len */
+ /* size of each buffer in case of split buffers (0 - single buffer). */
+ u32 buf_threshold;
+ const void *data_buf[1]; /* array of user space buffer pointers. */
+} buf_data_t;
+
/* private data of cfg80211 interface */
struct bcm_cfg80211 {
struct wireless_dev *wdev; /* representing cfg cfg80211 device */
struct completion wait_next_af;
struct mutex usr_sync; /* maily for up/down synchronization */
struct mutex if_sync; /* maily for iface op synchronization */
- struct mutex scan_complete; /* serialize scan_complete call */
+ struct mutex scan_sync; /* scan sync from different scan contexts */
struct wl_scan_results *bss_list;
struct wl_scan_results *scan_results;
struct p2p_info *p2p;
bool p2p_supported;
void *btcoex_info;
- struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+ timer_list_compat_t scan_timeout; /* Timer for catch scan event timeout */
#ifdef WL_CFG80211_GON_COLLISION
u8 block_gon_req_tx_count;
u8 block_gon_req_rx_count;
u8 curr_band;
#endif /* WL_HOST_BAND_MGMT */
bool scan_suppressed;
- struct timer_list scan_supp_timer;
+ timer_list_compat_t scan_supp_timer;
struct work_struct wlan_work;
struct mutex event_sync; /* maily for up/down synchronization */
bool disable_roam_event;
struct delayed_work pm_enable_work;
-#ifdef WL_IRQSET
- struct delayed_work irq_set_work;
-#endif /* WL_IRQSET */
struct workqueue_struct *event_workq; /* workqueue for event */
struct work_struct event_work; /* work item for event */
struct mutex pm_sync; /* mainly for pm work synchronization */
wait_queue_head_t ndp_if_change_event;
uint8 support_5g;
u8 nan_nmi_mac[ETH_ALEN];
- u8 nan_dp_mask;
+ u8 nan_dp_count;
wl_nancfg_t nancfg;
+ struct delayed_work nan_disable;
#ifdef WL_NAN_DISC_CACHE
int nan_disc_count;
nan_disc_result_cache *nan_disc_cache;
nan_svc_info_t svc_info[NAN_MAX_SVC_INST];
nan_ranging_inst_t nan_ranging_info[NAN_MAX_RANGING_INST];
#endif /* WL_NAN_DISC_CACHE */
+#ifdef WL_NANP2P
+ uint8 conc_disc;
+ bool nan_p2p_supported;
+#endif /* WL_NANP2P */
#endif /* WL_NAN */
-#ifdef WL_CFG80211_P2P_DEV_IF
- bool down_disc_if;
-#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef WL_IFACE_MGMT
+ iface_mgmt_data_t iface_data;
+#endif /* WL_IFACE_MGMT */
#ifdef P2PLISTEN_AP_SAMECHN
bool p2p_resp_apchn_status;
#endif /* P2PLISTEN_AP_SAMECHN */
bool rcc_enabled; /* flag for Roam channel cache feature */
u16 ap_oper_channel;
#ifdef DHD_LOSSLESS_ROAMING
- struct timer_list roam_timeout; /* Timer for catch roam timeout */
+ timer_list_compat_t roam_timeout; /* Timer for catch roam timeout */
#endif // endif
#ifndef DUAL_ESCAN_RESULT_BUFFER
uint16 escan_sync_id_cntr;
ap_rps_info_t ap_rps_info;
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
u16 vif_macaddr_mask;
+ osl_t *osh;
struct list_head vndr_oui_list;
spinlock_t vndr_oui_sync; /* to protect vndr_oui_list */
- osl_t *osh;
bool rssi_sum_report;
int rssi; /* previous RSSI (backup) of get_station */
uint64 scan_enq_time;
+ uint64 scan_deq_time;
+ uint64 scan_hdlr_cmplt_time;
+ uint64 scan_cmplt_time;
uint64 wl_evt_deq_time;
uint64 wl_evt_hdlr_entry_time;
uint64 wl_evt_hdlr_exit_time;
spinlock_t wps_sync; /* to protect wps states (and others if needed) */
#endif /* WL_WPS_SYNC */
struct wl_fils_info fils_info;
-
+ uint8 scanmac_enabled;
#ifdef WL_BCNRECV
/* structure used for fake ap detection info */
- struct mutex bcn_sync; /* mainly for bcn resume/suspend synchronization */
+ struct mutex bcn_sync; /* mainly for bcn resume/suspend synchronization */
wl_bcnrecv_info_t bcnrecv_info;
#endif /* WL_BCNRECV */
struct net_device *static_ndev;
uint8 static_ndev_state;
- uint8 scanmac_enabled;
+ bool hal_started;
+ wl_wlc_version_t wlc_ver;
+ bool scan_params_v2;
+#ifdef SUPPORT_AP_BWCTRL
+ u32 bw_cap_5g;
+#endif /* SUPPORT_AP_BWCTRL */
#if defined(RSSIAVG)
wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
int best_2g_ch;
int best_5g_ch;
uint handshaking;
+ int btc_mode;
bool wps_done;
wait_queue_head_t wps_done_event;
struct mutex in4way_sync;
ulong disconnected_jiffies;
};
-
#define WL_STATIC_IFIDX (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1)
enum static_ndev_states {
NDEV_STATE_NONE,
((cfg && cfg->static_ndev && \
(cfg->static_ndev_state & NDEV_STATE_FW_IF_CREATED)) ? true : false)
#define IS_CFG80211_STATIC_IF_NAME(cfg, name) \
- ((cfg && !strcmp(cfg->static_ndev->name, name)))
+ (cfg && cfg->static_ndev && \
+ !strncmp(cfg->static_ndev->name, name, strlen(name)))
#ifdef WL_SAE
typedef struct wl_sae_key_info {
- uint8 bssid[ETHER_ADDR_LEN];
+ uint8 peer_mac[ETHER_ADDR_LEN];
uint16 pmk_len;
uint16 pmkid_len;
const uint8 *pmk;
} wl_sae_key_info_t;
#endif /* WL_SAE */
-s32 wl_iftype_to_mode(wl_iftype_t iftype);
+typedef enum wl_concurrency_mode {
+ CONCURRENCY_MODE_NONE = 0,
+ CONCURRENCY_SCC_MODE,
+ CONCURRENCY_VSDB_MODE,
+ CONCURRENCY_RSDB_MODE
+} wl_concurrency_mode_t;
+
+typedef struct wl_wips_event_info {
+ uint32 timestamp;
+ struct ether_addr bssid;
+ uint16 misdeauth;
+ int16 current_RSSI;
+ int16 deauth_RSSI;
+} wl_wips_event_info_t;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define GCC_DIAGNOSTIC_PUSH() \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#define GCC_DIAGNOSTIC_POP() \
-_Pragma("GCC diagnostic pop")
-#else
-#define GCC_DIAGNOSTIC_PUSH()
-#define GCC_DIAGNOSTIC_POP()
-#endif /* STRICT_GCC_WARNINGS */
+s32 wl_iftype_to_mode(wl_iftype_t iftype);
#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
list_for_each_entry_safe((pos), (next), (head), member)
struct net_info *_net_info, *next;
unsigned long int flags;
int idx = 0;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
&cfg->net_list, list) {
- WL_INFORM_MEM(("%s: net_list[%d] bssidx: %d, "
- "ndev: %p, wdev: %p \n", __FUNCTION__,
- idx++, _net_info->bssidx,
- OSL_OBFUSCATE_BUF(_net_info->ndev),
- OSL_OBFUSCATE_BUF(_net_info->wdev)));
+ GCC_DIAGNOSTIC_POP();
+ WL_INFORM_MEM(("wl_probe_wdev_all: net_list[%d] bssidx: %d\n",
+ idx++, _net_info->bssidx));
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return;
}
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if ((bssidx >= 0) && (_net_info->bssidx == bssidx) &&
(_net_info->ifidx == ifidx)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return info;
}
#ifdef DHD_IFDEBUG
WL_INFORM_MEM(("dealloc_netinfo enter wdev=%p \n", OSL_OBFUSCATE_BUF(wdev)));
#endif // endif
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
wl_cfgbss_t *bss = &_net_info->bss;
MFREE(cfg->osh, _net_info, sizeof(struct net_info));
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
#ifdef DHD_IFDEBUG
WL_INFORM_MEM(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
#endif // endif
_net_info->roam_off = WL_INVALID;
_net_info->bssidx = bssidx;
_net_info->ifidx = ifidx;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
cfg->iface_cnt++;
list_add(&_net_info->list, &cfg->net_list);
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
#ifdef DHD_IFDEBUG
WL_DBG(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
wl_cfgbss_t *bss = &_net_info->bss;
+ GCC_DIAGNOSTIC_POP();
if (bss->wpa_ie) {
MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
MFREE(cfg->osh, _net_info, sizeof(struct net_info));
}
cfg->iface_cnt = 0;
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
static inline u32
wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
u32 cnt = 0;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(status, &_net_info->sme_state))
cnt++;
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return cnt;
}
static inline void
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
break; /* set all status is not allowed */
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
break; /* unknown operation */
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
static inline void
wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
struct net_info *_net_info, *next;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ if (status >= BITS_PER_LONG) {
+ /* max value for shift operation is
+ * (BITS_PER_LONG -1) for unsigned long.
+ * if status crosses BIT_PER_LONG, the variable
+ * sme_state should be correspondingly updated.
+ */
+ ASSERT(0);
+ return;
+ }
+
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
+ GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
/*
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
set_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
}
wl_cfgbss_t *bss = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
bss = &_net_info->bss;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return bss;
}
u32 stat = 0;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
stat = test_bit(status, &_net_info->sme_state);
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return stat;
}
s32 mode = -1;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->ndev && (_net_info->ndev == ndev)) {
mode = wl_iftype_to_mode(_net_info->iftype);
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return mode;
}
s32 bssidx = -1;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (_net_info->wdev && (_net_info->wdev == wdev)) {
bssidx = _net_info->bssidx;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return bssidx;
}
if (bssidx < 0)
return NULL;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if ((_net_info->bssidx == bssidx) && (_net_info->ifidx == ifidx)) {
wdev = _net_info->wdev;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return wdev;
}
struct wl_profile *prof = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
prof = &_net_info->profile;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return prof;
}
static inline struct net_info *
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return info;
}
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- spin_lock_irqsave(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH();
+ WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
info = _net_info;
break;
}
}
- GCC_DIAGNOSTIC_POP();
- spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
return info;
}
+static inline char *
+wl_iftype_to_str(int wl_iftype)
+{
+ switch (wl_iftype) {
+ case (WL_IF_TYPE_STA):
+ return "WL_IF_TYPE_STA";
+ case (WL_IF_TYPE_AP):
+ return "WL_IF_TYPE_AP";
+ case (WL_IF_TYPE_NAN_NMI):
+ return "WL_IF_TYPE_NAN_NMI";
+ case (WL_IF_TYPE_NAN):
+ return "WL_IF_TYPE_NAN";
+ case (WL_IF_TYPE_P2P_GO):
+ return "WL_IF_TYPE_P2P_GO";
+ case (WL_IF_TYPE_P2P_GC):
+ return "WL_IF_TYPE_P2P_GC";
+ case (WL_IF_TYPE_P2P_DISC):
+ return "WL_IF_TYPE_P2P_DISC";
+ case (WL_IF_TYPE_IBSS):
+ return "WL_IF_TYPE_IBSS";
+ case (WL_IF_TYPE_MONITOR):
+ return "WL_IF_TYPE_MONITOR";
+ case (WL_IF_TYPE_AIBSS):
+ return "WL_IF_TYPE_AIBSS";
+ default:
+ return "WL_IF_TYPE_UNKNOWN";
+ }
+}
+
+#define is_discovery_iface(iface) (((iface == WL_IF_TYPE_P2P_DISC) || \
+ (iface == WL_IF_TYPE_NAN_NMI)) ? 1 : 0)
#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
#define wdev_to_ndev(wdev) (wdev->netdev)
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
#define IS_P2P_IFACE(wdev) (wdev && \
((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) || \
(wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)))
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+
+#define IS_STA_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_STATION))
+
+#define IS_AP_IFACE(wdev) (wdev && \
+ (wdev->iftype == NL80211_IFTYPE_AP))
#if defined(WL_ENABLE_P2P_IF)
#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \
#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
#endif /* WLFBT */
-#ifdef BCMCCX
-#define IS_AKM_SUITE_CCKM(sec) (sec->wpa_auth == WLAN_AKM_SUITE_CCKM)
-#else
#define IS_AKM_SUITE_CCKM(sec) ({BCM_REFERENCE(sec); FALSE;})
-#endif /* BCMCCX */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info)
extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
void *data);
+extern s32 wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
+ const wl_event_msg_t * e);
+
void wl_cfg80211_set_parent_dev(void *dev);
struct device *wl_cfg80211_get_parent_dev(void);
struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void);
#endif /* WL_STATIC_IF */
extern s32 wl_cfg80211_up(struct net_device *net);
extern s32 wl_cfg80211_down(struct net_device *net);
+extern void wl_cfg80211_sta_ifdown(struct net_device *net);
extern s32 wl_cfg80211_notify_ifadd(struct net_device * dev, int ifidx, char *name, uint8 *mac,
uint8 bssidx, uint8 role);
extern s32 wl_cfg80211_notify_ifdel(struct net_device * dev, int ifidx, char *name, uint8 *mac,
extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
extern void wl_cfg80211_cleanup_if(struct net_device *dev);
-extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
-extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
extern bool wl_cfg80211_is_concurrent_mode(struct net_device * dev);
+extern void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason);
+extern void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason);
extern void* wl_cfg80211_get_dhdp(struct net_device * dev);
extern bool wl_cfg80211_is_p2p_active(struct net_device * dev);
extern bool wl_cfg80211_is_roam_offload(struct net_device * dev);
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len);
+extern bool wl_cfg80211_check_vif_in_use(struct net_device *ndev);
#ifdef P2PLISTEN_AP_SAMECHN
extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
#endif /* P2PLISTEN_AP_SAMECHN */
struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern void wl_cfg80211_update_power_mode(struct net_device *dev);
-extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
extern void wl_terminate_event_handler(struct net_device *dev);
extern struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev);
extern s32 wl_cfg80211_set_if_band(struct net_device *ndev, int band);
extern s32 wl_cfg80211_set_country_code(struct net_device *dev, char *country_code,
bool notify, bool user_enforced, int revinfo);
+extern bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg);
+#ifdef WL_WIPSEVT
+extern int wl_cfg80211_wips_event(uint16 misdeauth, char* bssid);
+extern int wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event);
+#endif /* WL_WIPSEVT */
#define SCAN_BUF_CNT 2
#define SCAN_BUF_NEXT 1
extern void wl_cfg80211_block_arp(struct net_device *dev, int enable);
#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#ifdef WLTDLS
+extern s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+ enum wl_tdls_config state, bool tdls_mode);
+extern s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+
#ifdef WL_NAN
extern int wl_cfgvendor_send_nan_event(struct wiphy * wiphy,
struct net_device *dev, int event_id,
struct net_device *dev, wl_nan_ev_rng_rpt_ind_t *range_res,
uint32 status);
#endif /* RTT_SUPPORT */
+#ifdef WL_NANP2P
+extern int wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
+ uint8 arg_val);
+extern uint8 wl_cfg80211_get_iface_conc_disc(struct net_device *ndev);
+#endif /* WL_NANP2P */
#endif /* WL_NAN */
#ifdef WL_CFG80211_P2P_DEV_IF
extern s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
#endif /* P2P_LISTEN_OFFLOADING */
+/* Function to flush the FW log buffer content */
+extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
+
#define RETURN_EIO_IF_NOT_UP(wlpriv) \
do { \
struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
int wl_cfg80211_iface_count(struct net_device *dev);
struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname);
void wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd);
+#ifdef WL_IFACE_MGMT
+extern int wl_cfg80211_set_iface_policy(struct net_device *ndev, char *arg, int len);
+extern uint8 wl_cfg80211_get_iface_policy(struct net_device *ndev);
+extern s32 wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+s32 wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+s32 wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype,
+ bool *disable_nan, bool *disable_p2p);
+s32 wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+wl_iftype_t wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg);
+bool wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
+#endif /* WL_IFACE_MGMT */
struct wireless_dev * wl_cfg80211_add_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
wl_iftype_t wl_iftype, const char *name, u8 *mac);
extern s32 wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
struct wireless_dev *wdev, char *name);
+s32 _wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
+ struct wireless_dev *wdev, char *ifname);
+s32 wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg, wl_iftype_t sec_data_if_type);
+
#ifdef WL_STATIC_IF
extern struct net_device *wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg,
u16 iftype, char *ifname);
extern s32 wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype);
extern int wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats);
extern s32 wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level);
-extern s32 wl_cfg80211_check_for_nan_support(struct bcm_cfg80211 *cfg);
extern int wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 * cfg);
extern int wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable);
extern int wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg);
+extern uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg);
#ifdef WL_WPS_SYNC
void wl_handle_wps_states(struct net_device *ndev, u8 *dump_data, u16 len, bool direction);
#endif /* WL_WPS_SYNC */
-
-/* Function to flush the FW log buffer content */
-#ifdef DHD_LOG_DUMP
-extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
-#else
-#define wl_flush_fw_log_buffer(x, y)
-#endif // endif
+extern int wl_features_set(u8 *array, uint8 len, u32 ftidx);
+extern void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+extern s32 wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *event, void *data);
+extern s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+extern void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
+extern s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool aborted, bool fw_abort);
+extern s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
+extern s32 wl_cfg80211_net_attach(struct net_device *primary_ndev);
+extern void wl_print_verinfo(struct bcm_cfg80211 *cfg);
+extern const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
+extern int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg);
+#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
+int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
+ s32 bssidx, u32 interface_type, bool set);
+#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
+extern s32 wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable);
+#ifdef SUPPORT_AP_SUSPEND
+extern int wl_set_ap_suspend(struct net_device *dev, bool enable, char *ifname);
+#endif /* SUPPORT_AP_SUSPEND */
+#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
+int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable);
+int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param);
+#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+#ifdef SUPPORT_AP_BWCTRL
+extern int wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname);
+extern int wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len);
+#endif /* SUPPORT_AP_BWCTRL */
+bool wl_cfg80211_check_in_progress(struct net_device *dev);
s32 wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len);
+int wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, uint action, enum wl_ext_status status, void *context);
#endif /* _wl_cfg80211_h_ */
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg_btcoex.c 740404 2018-01-11 15:00:13Z $
+ * $Id: wl_cfg_btcoex.c 814554 2019-04-11 23:06:22Z $
*/
#include <net/rtnetlink.h>
#endif // endif
struct btcoex_info {
- struct timer_list timer;
+ timer_list_compat_t timer;
u32 timer_ms;
u32 timer_on;
u32 ts_dhcp_start; /* ms ts ecord time stats */
/* T2 turn off SCO/SCO supperesion is (timeout) */
#define BT_DHCP_FLAG_FORCE_TIME 5500
+#define BTCOEXMODE "BTCOEXMODE"
+#define POWERMODE "POWERMODE"
+
enum wl_cfg80211_btcoex_status {
BT_DHCP_IDLE,
BT_DHCP_START,
} var;
int error;
- memset(&var, 0, sizeof(var));
+ bzero(&var, sizeof(var));
error = bcm_mkiovar(name, (char *)(®), sizeof(reg), (char *)(&var), sizeof(var.buf));
if (error == 0) {
return BCME_BUFTOOSHORT;
{
char reg_addr[8];
- memset(reg_addr, 0, sizeof(reg_addr));
+ bzero(reg_addr, sizeof(reg_addr));
memcpy((char *)®_addr[0], (char *)addr, 4);
memcpy((char *)®_addr[4], (char *)val, 4);
#endif // endif
}
-static void wl_cfg80211_bt_timerfunc(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- ulong data
-#endif
-)
+static void wl_cfg80211_bt_timerfunc(ulong data)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct btcoex_info *bt_local = from_timer(bt_local, t, timer);
-#else
struct btcoex_info *bt_local = (struct btcoex_info *)data;
-#endif
WL_TRACE(("Enter\n"));
bt_local->timer_on = 0;
schedule_work(&bt_local->work);
{
struct btcoex_info *btcx_inf;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
btcx_inf = container_of(work, struct btcoex_info, work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
if (btcx_inf->timer_on) {
btcx_inf->timer_on = 0;
btco_inf->ts_dhcp_ok = 0;
/* Set up timer for BT */
btco_inf->timer_ms = 10;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&btco_inf->timer, wl_cfg80211_bt_timerfunc, 0);
-#else
- init_timer(&btco_inf->timer);
- btco_inf->timer.data = (ulong)btco_inf;
- btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
-#endif
+ init_timer_compat(&btco_inf->timer, wl_cfg80211_bt_timerfunc, btco_inf);
btco_inf->dev = ndev;
struct btcoex_info *btco_inf = btcoex_info_loc;
char powermode_val = 0;
+ uint8 cmd_len = 0;
char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
/* Figure out powermode 1 or o command */
- strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+ cmd_len = sizeof(BTCOEXMODE);
+ powermode_val = command[cmd_len];
- if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+ if (powermode_val == '1') {
WL_TRACE_HW4(("DHCP session starts\n"));
#ifdef PKT_FILTER_SUPPORT
btco_inf->bt_state = BT_DHCP_START;
btco_inf->timer_on = 1;
- mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+ mod_timer(&btco_inf->timer,
+ timer_expires(&btco_inf->timer));
WL_TRACE(("enable BT DHCP Timer\n"));
}
}
WL_ERR(("was called w/o DHCP OFF. Continue\n"));
}
}
- else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+ else if (powermode_val == '2') {
#ifdef PKT_FILTER_SUPPORT
dhd->dhcp_in_progress = 0;
WL_ERR(("Unkwown yet power setting, ignored\n"));
}
- snprintf(command, 3, "OK");
-
- return (strlen("OK"));
+ return (snprintf(command, sizeof("OK"), "OK") + 1);
}
/*
* Neighbor Awareness Networking
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
+ *
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgnan.c 769459 2018-06-26 11:37:53Z $
+ * $Id: wl_cfgnan.c 825970 2019-06-18 05:28:31Z $
*/
#ifdef WL_NAN
#include <bcmwifi_channels.h>
#include <nan.h>
#include <bcmiov.h>
+#include <net/rtnetlink.h>
#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
#include <wl_android.h>
#include <wl_cfgnan.h>
#include <dngl_stats.h>
#include <dhd.h>
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
#include <wl_cfgvendor.h>
#include <bcmbloom.h>
#include <wl_cfgp2p.h>
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#include <bcmstdlib_s.h>
#define NAN_RANGE_REQ_EVNT 1
#define NAN_RAND_MAC_RETRIES 10
#define NAN_SCAN_DWELL_TIME_DELTA_MS 10
#ifdef WL_NAN_DISC_CACHE
-static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data);
+/* Disc Cache Parameters update Flags */
+#define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
+
+static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
+ u16 *disc_cache_update_flags);
static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
uint8 remote_pubid, struct ether_addr *peer);
#endif /* WL_NAN_DISC_CACHE */
-static void wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id);
-
+static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
static int wl_cfgnan_get_capability(struct net_device *ndev,
struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
+static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
+
+static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst);
+
+static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data);
+
+void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+
+static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
+
+static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst);
+
+#ifdef RTT_SUPPORT
+static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
+ struct ether_addr * peer, int reason);
+#endif /* RTT_SUPPORT */
+
static const char *nan_role_to_str(u8 role)
{
switch (role) {
struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
uint16 resp_buf_len);
-static s32
-wl_cfgnan_send_stop_event(nan_event_data_t *nan_event_data, struct bcm_cfg80211 *cfg);
int
wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
{
/* attribute length */
WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
-
- tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
+ if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
+ tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
+ }
offset = sizeof(*nan_svc_desc_ext_attr);
if (offset > len) {
WL_ERR(("Invalid event buffer len\n"));
tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
/* must be able to handle null msg which is not error */
tlv_data->sde_svc_info.dlen = 0;
- WL_ERR(("data length is invalid\n"));
+ WL_ERR(("sde data length is invalid\n"));
ret = BCME_BADLEN;
goto fail;
}
}
p_attr += offset;
len -= offset;
- memcpy(tlv_data->sde_svc_info.data, p_attr, tlv_data->sde_svc_info.dlen);
+ ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
+ p_attr, tlv_data->sde_svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy sde_svc_info\n"));
+ goto fail;
+ }
} else {
/* must be able to handle null msg which is not error */
tlv_data->sde_svc_info.dlen = 0;
WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
/* service ID */
- memcpy(tlv_data->svc_name, nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
+ ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
+ nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc_hash_name:\n"));
+ return ret;
+ }
WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
/* local instance ID */
WL_TRACE(("> svc_control: binding bitmap present\n"));
/* Copy binding bitmap */
- memcpy(&bitmap, p_attr, NAN_BINDING_BITMAP_LEN);
+ ret = memcpy_s(&bitmap, sizeof(bitmap),
+ p_attr, NAN_BINDING_BITMAP_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy bit map\n"));
+ return ret;
+ }
WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
if (NAN_BINDING_BITMAP_LEN > len) {
ret = -ENOMEM;
goto fail;
}
- memcpy(tlv_data->tx_match_filter.data, p_attr,
- tlv_data->tx_match_filter.dlen);
-
+ ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
+ p_attr, tlv_data->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto fail;
+ }
/* advance read pointer */
offset = tlv_data->tx_match_filter.dlen;
if (offset > len) {
goto fail;
}
- memcpy(tlv_data->rx_match_filter.data, p_attr,
- tlv_data->rx_match_filter.dlen);
+ ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
+ p_attr, tlv_data->rx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy rx match filter data\n"));
+ goto fail;
+ }
/* advance read pointer */
offset = tlv_data->rx_match_filter.dlen;
tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
/* must be able to handle null msg which is not error */
tlv_data->svc_info.dlen = 0;
- WL_ERR(("data length is invalid\n"));
+ WL_ERR(("sde data length is invalid\n"));
ret = BCME_BADLEN;
goto fail;
}
ret = BCME_NOMEM;
goto fail;
}
- memcpy(tlv_data->svc_info.data, p_attr, tlv_data->svc_info.dlen);
+ ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
+ p_attr, tlv_data->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto fail;
+ }
/* advance read pointer */
offset = tlv_data->svc_info.dlen;
tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
tlv_data->publish_rssi = ev_disc->publish_rssi;
- memcpy(&tlv_data->remote_nmi, &ev_disc->pub_mac, ETHER_ADDR_LEN);
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_disc->pub_mac, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sda_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
}
if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
+ "error = %d \n", ret));
+ goto fail;
+ }
}
offset = (sizeof(*p_attr) +
sizeof(ev_disc->attr_list_len) +
tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
tlv_data->fup_rssi = ev_fup->fup_rssi;
- memcpy(&tlv_data->remote_nmi, &ev_fup->remote_addr, ETHER_ADDR_LEN);
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_fup->remote_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
WL_TRACE(("local id: %d\n", ev_fup->local_id));
WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
tlv_data->sub_rssi = ev_replied->sub_rssi;
- memcpy(&tlv_data->remote_nmi, &ev_replied->sub_mac, ETHER_ADDR_LEN);
+ ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_replied->sub_mac, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto fail;
+ }
WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
p_attr += offset;
len -= offset;
ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
+ "error = %d \n", ret));
+ }
}
fail:
goto fail;
}
tlv_data->svc_info.dlen = len;
- memcpy(tlv_data->svc_info.data, data, tlv_data->svc_info.dlen);
+ ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
+ data, tlv_data->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info data\n"));
+ goto fail;
+ }
break;
}
default:
s32 ret = BCME_OK;
uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
uint16 subcmd_len;
- uint32 event_mask = 0;
uint32 status;
bcm_iov_batch_subcmd_t *sub_cmd = NULL;
bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
+ wl_nan_evmask_extn_t *evmask;
+ uint16 evmask_cmd_len;
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
NAN_DBG_ENTER();
+ /* same src and dest len here */
+ (void)memset_s(event_mask, WL_NAN_EVMASK_EXTN_VER, 0, WL_NAN_EVMASK_EXTN_VER);
+ evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
+ WL_NAN_EVMASK_EXTN_LEN;
ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
if (unlikely(ret)) {
WL_ERR((" nan event enable failed, error = %d \n", ret));
sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- sizeof(event_mask), &subcmd_len);
+ evmask_cmd_len, &subcmd_len);
if (unlikely(ret)) {
WL_ERR(("nan_sub_cmd check failed\n"));
goto fail;
}
sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(event_mask);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
+ evmask->ver = WL_NAN_EVMASK_EXTN_VER;
+ evmask->len = WL_NAN_EVMASK_EXTN_LEN;
nan_buf_size -= subcmd_len;
nan_buf->count = 1;
if (disable_events) {
WL_DBG(("Disabling all nan events..except stop event\n"));
- event_mask = NAN_EVENT_BIT(WL_NAN_EVENT_STOP);
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
} else {
/*
* Android framework event mask configuration.
*/
- if (event_ind_flag) {
- nan_buf->is_set = false;
- memset(resp_buf, 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("get nan event mask failed ret %d status %d \n",
- ret, status));
- goto fail;
- }
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ nan_buf->is_set = false;
+ memset(resp_buf, 0, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan event mask failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
- /* check the response buff */
- event_mask = (*(uint8*)&sub_cmd_resp->data[0]);
+ /* check the response buff */
+ /* same src and dest len here */
+ (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
+ (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
+ if (event_ind_flag) {
if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
WL_DBG(("Need to add disc mac addr change event\n"));
}
/* BIT2 - Disable nan cluster join indication (OTA). */
if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
- event_mask &= ~NAN_EVENT_BIT(WL_NAN_EVENT_MERGE);
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
}
- } else {
- /* enable only selected nan events to avoid unnecessary host wake up */
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_START);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_MERGE);
}
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DISCOVERY_RESULT);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RECEIVE);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_TERMINATED);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_STOP);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_TXS);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_PEER_DATAPATH_IND);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_ESTB);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_END);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_RPT_IND);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_REQ_IND);
- event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_TERM_IND);
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
+ setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
+ /* Disable below events by default */
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
+ clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
}
nan_buf->is_set = true;
- memcpy(sub_cmd->data, &event_mask, sizeof(event_mask));
+ evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
+ /* same src and dest len here */
+ (void)memcpy_s((uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN,
+ &event_mask, WL_NAN_EVMASK_EXTN_LEN);
+
nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
/* populate wl_avail_type */
avail->flags = avail_type;
if (avail_type == WL_AVAIL_RANGING) {
- memcpy(&avail->addr, &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer nmi\n"));
+ goto fail;
+ }
}
sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
WL_TRACE(("Read wl nan avail status\n"));
- memset(resp_buf, 0, sizeof(resp_buf));
+
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret)) {
/* account for partially filled most significant byte */
entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
if (avail_type == WL_AVAIL_NDC) {
- memcpy(&avail->addr, ndc_id, ETHER_ADDR_LEN);
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ ndc_id, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ndc id\n"));
+ goto fail;
+ }
} else if (avail_type == WL_AVAIL_RANGING) {
- memcpy(&avail->addr, &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
+ &cmd_data->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer nmi\n"));
+ goto fail;
+ }
}
/* account for partially filled most significant byte */
nan_iov_end = nan_iov_data->nan_iov_len;
nan_buf_size = (nan_iov_start - nan_iov_end);
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(*status)) {
} else {
cfg_ctrl &= ~flag;
}
- memcpy(sub_cmd->data, &cfg_ctrl, sizeof(cfg_ctrl));
+ ret = memcpy_s(sub_cmd->data, sizeof(cfg_ctrl),
+ &cfg_ctrl, sizeof(cfg_ctrl));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cfg ctrl\n"));
+ goto fail;
+ }
+
nan_buf->is_set = true;
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
}
/* first 4 bytes consists status */
- memcpy(&status, data, sizeof(uint32));
+ if (memcpy_s(&status, sizeof(status),
+ data, sizeof(uint32)) != BCME_OK) {
+ WL_ERR(("Failed to copy status\n"));
+ goto exit;
+ }
+
status = dtoh32(status);
/* If status is non zero */
sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, (uint8 *)if_addr,
- sizeof(*if_addr));
+ ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
+ (uint8 *)if_addr, sizeof(*if_addr));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy if addr\n"));
+ goto fail;
+ }
*nan_buf_size -= subcmd_len;
} else {
return ret;
}
+static int
+wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ wl_nan_ver_t *nan_ver = NULL;
+ uint16 subcmd_len;
+ uint32 status;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+
+ NAN_DBG_ENTER();
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*nan_ver), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ nan_ver = (wl_nan_ver_t *)sub_cmd->data;
+ sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+
+ nan_buf->is_set = false;
+ bzero(resp_buf, sizeof(resp_buf));
+ nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
+
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan ver failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
+ if (!nan_ver) {
+ ret = BCME_NOTFOUND;
+ WL_ERR(("nan_ver not found: err = %d\n", ret));
+ goto fail;
+ }
+ cfg->nancfg.version = *nan_ver;
+ WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg.version));
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+
+}
+
static int
wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
{
if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
if_addr.octet) != BCME_OK) {
ret = -EINVAL;
+ WL_ERR(("Failed to get mac addr for NMI\n"));
goto fail;
}
}
nan_buf->count++;
nan_buf->is_set = true;
nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
ret, status));
goto fail;
}
- memcpy(cfg->nan_nmi_mac, if_addr.octet, ETH_ALEN);
+ ret = memcpy_s(cfg->nan_nmi_mac, ETH_ALEN,
+ if_addr.octet, ETH_ALEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nmi addr\n"));
+ goto fail;
+ }
return ret;
fail:
if (!rand_mac) {
sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, (uint8*)&val, sizeof(uint8));
+ ret = memcpy_s(sub_cmd->data, sizeof(uint8),
+ (uint8*)&val, sizeof(uint8));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy init value\n"));
+ goto fail;
+ }
*nan_buf_size -= subcmd_len;
} else {
sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, (uint8*)&val, sizeof(uint8));
+ ret = memcpy_s(sub_cmd->data, sizeof(uint8),
+ (uint8*)&val, sizeof(uint8));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy enab value\n"));
+ return ret;
+ }
nan_iov_data->nan_iov_len -= subcmd_len;
nan_iov_data->nan_iov_buf += subcmd_len;
chanspec_arg = CH20MHZ_CHSPEC(chan);
chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
- memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
- ret = wldev_iovar_getbuf(ndev, "per_chan_info", (void *)&chanspec_arg, sizeof(chanspec_arg),
+ memset_s(ioctl_buf, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
+ ret = wldev_iovar_getbuf(ndev, "per_chan_info",
+ (void *)&chanspec_arg, sizeof(chanspec_arg),
ioctl_buf, WLC_IOCTL_SMLEN, NULL);
if (ret != BCME_OK) {
WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
nan_iov_end = nan_iov_data->nan_iov_len;
nan_buf_size = (nan_iov_start - nan_iov_end);
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(status)) {
sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, (uint8 *)&cmd_data->clus_id,
+ ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
+ (uint8 *)&cmd_data->clus_id,
sizeof(cmd_data->clus_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy clus id\n"));
+ return ret;
+ }
nan_iov_data->nan_iov_len -= subcmd_len;
nan_iov_data->nan_iov_buf += subcmd_len;
sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, (uint32 *)&cmd_data->nan_oui,
+ ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
+ (uint32 *)&cmd_data->nan_oui,
sizeof(cmd_data->nan_oui));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan oui\n"));
+ return ret;
+ }
nan_iov_data->nan_iov_len -= subcmd_len;
nan_iov_data->nan_iov_buf += subcmd_len;
} else {
/* Set 5G awake dw value to fw default value 1 */
awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
+ ret = wl_cfgnan_config_control_flag(ndev, cfg,
+ WL_NAN_CTRL_DISC_BEACON_TX_5G |
+ WL_NAN_CTRL_SYNC_BEACON_TX_5G,
+ &(cmd_data->status), TRUE);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan control set config handler, ret = %d"
+ " status = %d \n", ret, cmd_data->status));
+ goto fail;
+ }
}
}
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
int i;
s32 timeout = 0;
- bool mutex_locked = false;
+ nan_hal_capabilities_t capabilities;
NAN_DBG_ENTER();
+
+ /* Protect discovery creation. Ensure proper mutex precedence.
+ * If if_sync & nan_mutex comes together in same context, nan_mutex
+ * should follow if_sync.
+ */
+ mutex_lock(&cfg->if_sync);
NAN_MUTEX_LOCK();
- mutex_locked = true;
- if (!wl_cfg80211_check_for_nan_support(cfg)) {
- ret = BCME_UNSUPPORTED;
+ if (!dhdp->up) {
+ WL_ERR(("bus is already down, hence blocking nan start\n"));
+ ret = BCME_ERROR;
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
goto fail;
}
- /* disable P2P */
- ret = wl_cfg80211_deinit_p2p_discovery(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to disable p2p_disc during nan_enab"));
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
+ WL_ERR(("Conflicting iface is present, cant support nan\n"));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
}
- WL_ERR(("Initializing NAN\n"));
+#endif /* WL_IFACE_MGMT */
+
+ WL_INFORM_MEM(("Initializing NAN\n"));
ret = wl_cfgnan_init(cfg);
if (ret != BCME_OK) {
WL_ERR(("failed to initialize NAN[%d]\n", ret));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+ goto fail;
+ }
+
+ ret = wl_cfgnan_get_ver(ndev, cfg);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to Nan IOV version[%d]\n", ret));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
goto fail;
}
ret = wl_cfgnan_set_if_addr(cfg);
if (ret != BCME_OK) {
WL_ERR(("Failed to set nmi address \n"));
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
goto fail;
}
+ cfg->nancfg.nan_event_recvd = false;
+ NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
for (i = 0; i < NAN_MAX_NDI; i++) {
/* Create NDI using the information provided by user space */
/* enable events */
ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
if (unlikely(ret)) {
+ WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
goto fail;
}
nan_buf_size -= nan_iov_data->nan_iov_len;
memset(resp_buf, 0, sizeof(resp_buf));
- mutex_locked = false;
/* Reset conditon variable */
- cfg->nancfg.nan_event_recvd = false;
- /* Releasing lock to allow event processing */
- NAN_MUTEX_UNLOCK();
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
&(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(cmd_data->status)) {
ret, cmd_data->status));
goto fail;
}
+
timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
cfg->nancfg.nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
if (!timeout) {
ret, cmd_data->status));
goto fail;
}
+
+ /* By default set NAN proprietary rates */
+ ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_PROP_RATE,
+ &(cmd_data->status), true);
+ if (unlikely(ret) || unlikely(cmd_data->status)) {
+ WL_ERR((" nan proprietary rate set failed, ret = %d status = %d \n",
+ ret, cmd_data->status));
+ goto fail;
+ }
+
+ /* malloc for ndp peer list */
+ if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
+ == BCME_OK) {
+ cfg->nancfg.max_ndp_count = capabilities.max_ndp_sessions;
+ cfg->nancfg.nan_ndp_peer_info = MALLOCZ(cfg->osh,
+ cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
+ if (!cfg->nancfg.nan_ndp_peer_info) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ } else {
+ WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
+ goto fail;
+ }
+
+#ifdef RTT_SUPPORT
+ /* Initialize geofence cfg */
+ dhd_rtt_initialize_geofence_cfg(cfg->pub);
+#endif /* RTT_SUPPORT */
+
+ cfg->nan_enable = true;
WL_INFORM_MEM(("[NAN] Enable successfull \n"));
+ /* disable TDLS on NAN NMI IF create */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
+
fail:
/* reset conditon variable */
cfg->nancfg.nan_event_recvd = false;
if (unlikely(ret) || unlikely(cmd_data->status)) {
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created) {
- WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i));
- ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
- (char*)cfg->nancfg.ndi[i].ifname,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
- if (ret) {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- }
- }
+ cfg->nan_enable = false;
+ mutex_lock(&cfg->if_sync);
+ ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
}
+ mutex_unlock(&cfg->if_sync);
}
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
}
- if (mutex_locked)
- NAN_MUTEX_UNLOCK();
-
NAN_DBG_EXIT();
return ret;
}
int
-wl_cfgnan_disable(struct bcm_cfg80211 *cfg, nan_stop_reason_code_t reason)
+wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- int i = 0;
NAN_DBG_ENTER();
- if (cfg->nan_enable) {
+ if ((cfg->nan_init_state == TRUE) &&
+ (cfg->nan_enable == TRUE)) {
struct net_device *ndev;
ndev = bcmcfg_to_prmry_ndev(cfg);
- cfg->nancfg.disable_reason = reason;
- ret = wl_cfgnan_stop_handler(ndev, cfg, false);
+
+ /* We have to remove NDIs so that P2P/Softap can work */
+ ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
if (ret != BCME_OK) {
- WL_ERR(("failed to stop nan, error[%d]\n", ret));
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
}
- /* We have to remove NDIs so that P2P/Softap can work */
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created) {
- WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i));
- ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
- (char*)cfg->nancfg.ndi[i].ifname,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
- if (ret) {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- }
- cfg->nancfg.ndi[i].created = false;
- }
+
+ WL_INFORM_MEM(("Nan Disable Req, reason = %d\n", cfg->nancfg.disable_reason));
+ ret = wl_cfgnan_stop_handler(ndev, cfg);
+ if (ret == -ENODEV) {
+ WL_ERR(("Bus is down, no need to proceed\n"));
+ } else if (ret != BCME_OK) {
+ WL_ERR(("failed to stop nan, error[%d]\n", ret));
}
ret = wl_cfgnan_deinit(cfg, dhdp->up);
if (ret != BCME_OK) {
WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
+ if (!dhd_query_bus_erros(dhdp)) {
+ ASSERT(0);
+ }
}
+ wl_cfgnan_disable_cleanup(cfg);
}
NAN_DBG_EXIT();
return ret;
}
-static s32
-wl_cfgnan_send_stop_event(nan_event_data_t *nan_event_data, struct bcm_cfg80211 *cfg)
+static void
+wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
+ nan_event_data_t *nan_event_data = NULL;
+
NAN_DBG_ENTER();
- memset(nan_event_data, 0, NAN_IOCTL_BUF_SIZE);
- nan_event_data->status = NAN_STATUS_SUCCESS;
- memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
- strlen("NAN_STATUS_SUCCESS"));
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
- GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to send event to nan hal, (%d)\n",
- GOOGLE_NAN_EVENT_DISABLED));
+
+ if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
+ /* do not event to host if command is from host */
+ goto exit;
+ }
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ bzero(nan_event_data, sizeof(nan_event_data_t));
+
+ if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
+ nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
+ } else {
+ nan_event_data->status = NAN_STATUS_SUCCESS;
+ }
+
+ nan_event_data->status = NAN_STATUS_SUCCESS;
+ ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
+ "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
+ goto exit;
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to send event to nan hal, (%d)\n",
+ GOOGLE_NAN_EVENT_DISABLED));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+exit:
+ if (nan_event_data) {
+ MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
}
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
- WL_INFORM(("Sending disabled event if Bus is down\n"));
- /* Resetting instance ID mask */
- cfg->nancfg.inst_id_start = 0;
- memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
- memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
- cfg->nan_enable = false;
NAN_DBG_EXIT();
- return ret;
+ return;
+}
+
+void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
+ rtt_target_info_t *target_info = NULL;
+
+ /* Delete the geofence rtt target list */
+ dhd_rtt_delete_geofence_target_list(dhdp);
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
+ }
+ /* Remove if any pending proxd timeout for nan-rtt */
+ target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ if (target_info && target_info->peer == RTT_PEER_NAN) {
+ /* Cancel pending proxd timeout work if any */
+ if (delayed_work_pending(&rtt_status->proxd_timeout)) {
+ cancel_delayed_work_sync(&rtt_status->proxd_timeout);
+ }
+ }
+ /* Delete if any directed nan rtt session */
+ dhd_rtt_delete_nan_session(dhdp);
+#endif /* RTT_SUPPORT */
+ /* Clear the NDP ID array and dp count */
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ cfg->nancfg.ndp_id[i] = 0;
+ }
+ cfg->nan_dp_count = 0;
+ if (cfg->nancfg.nan_ndp_peer_info) {
+ MFREE(cfg->osh, cfg->nancfg.nan_ndp_peer_info,
+ cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
+ cfg->nancfg.nan_ndp_peer_info = NULL;
+ }
+ return;
+}
+
+/*
+ * Deferred nan disable work,
+ * scheduled with 3sec delay in order to remove any active nan dps
+ */
+void
+wl_cfgnan_delayed_disable(struct work_struct *work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, nan_disable.work);
+
+ rtnl_lock();
+ wl_cfgnan_disable(cfg);
+ rtnl_unlock();
}
int
wl_cfgnan_stop_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, bool disable_events)
+ struct bcm_cfg80211 *cfg)
{
bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_ERROR;
+ s32 ret = BCME_OK;
uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
wl_nan_iov_t *nan_iov_data = NULL;
uint32 status;
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- uint8 buf[NAN_IOCTL_BUF_SIZE];
- nan_event_data_t *nan_event_data = (nan_event_data_t*)buf;
- s32 timeout;
- bool mutex_locked = false;
NAN_DBG_ENTER();
NAN_MUTEX_LOCK();
- mutex_locked = true;
if (!cfg->nan_enable) {
WL_INFORM(("Nan is not enabled\n"));
* Framework doing cleanup(iface remove) on disable command,
* so avoiding event to prevent iface delete calls again
*/
- if (disable_events) {
- WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
- wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
- }
+ WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
+ wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
+
nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
if (!nan_buf) {
WL_ERR(("%s: memory allocation failed\n", __func__));
nan_buf->count++;
nan_buf->is_set = true;
nan_buf_size -= nan_iov_data->nan_iov_len;
- memset(resp_buf, 0, sizeof(resp_buf));
- mutex_locked = false;
- /* reset conditon variable */
- cfg->nancfg.nan_event_recvd = false;
- /* Releasing lock to allow event processing */
- NAN_MUTEX_UNLOCK();
+ memset_s(resp_buf, sizeof(resp_buf),
+ 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(status)) {
WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
goto fail;
}
- cfg->nan_enable = false;
- timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
- cfg->nancfg.nan_event_recvd,
- msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
- if (!timeout) {
- WL_ERR(("Timed out while Waiting for"
- " WL_NAN_EVENT_STOP event !!!\n"));
- ret = BCME_ERROR;
- goto fail;
- }
- WL_INFORM_MEM(("[NAN] Disable done\n"));
- } else {
- /* Sending up NAN disabled event, to clear the nan state in framework */
- ret = wl_cfgnan_send_stop_event(nan_event_data, cfg);
+ /* Enable back TDLS if connected interface is <= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
}
+
+ wl_cfgnan_send_stop_event(cfg);
+
fail:
- /* reset conditon variable */
- cfg->nancfg.nan_event_recvd = false;
+ /* Resetting instance ID mask */
+ cfg->nancfg.inst_id_start = 0;
+ memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
+ memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
+ cfg->nan_enable = false;
+
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
}
MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
}
- if (mutex_locked)
- NAN_MUTEX_UNLOCK();
+ NAN_MUTEX_UNLOCK();
NAN_DBG_EXIT();
return ret;
}
ret = wl_cfgnan_config_eventmask(ndev, cfg,
cmd_data->disc_ind_cfg, false);
if (unlikely(ret)) {
+ WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
+ ret));
goto fail;
}
}
nan_buf_size -= nan_iov_data->nan_iov_len;
if (nan_buf->count) {
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf),
+ 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
&(cmd_data->status),
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
return NULL;
}
-static
nan_ranging_inst_t *
wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
{
return NULL;
}
+nan_ranging_inst_t *
+wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
+{
+ uint8 i;
+ if (rng_id) {
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ if (cfg->nan_ranging_info[i].range_id == rng_id)
+ {
+ return &(cfg->nan_ranging_info[i]);
+ }
+ }
+ }
+ WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
+ return NULL;
+}
+
+/*
+ * Find ranging inst for given peer,
+ * On not found, create one
+ * with given range role
+ */
nan_ranging_inst_t *
wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
- uint8 svc_id, bool create)
+ nan_range_role_t range_role)
{
nan_ranging_inst_t *ranging_inst = NULL;
uint8 i;
+ if (!peer) {
+ WL_ERR(("Peer address is NULL"));
+ goto done;
+ }
+
ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
if (ranging_inst) {
goto done;
}
- if (create) {
- WL_TRACE(("Creating Ranging instance \n"));
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- if (cfg->nan_ranging_info[i].range_id == 0)
- break;
- }
- if (i == NAN_MAX_RANGING_INST) {
- WL_DBG(("No buffer available for the ranging instance"));
- goto done;
+ WL_TRACE(("Creating Ranging instance \n"));
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ if (cfg->nan_ranging_info[i].in_use == FALSE) {
+ break;
}
- ranging_inst = &cfg->nan_ranging_info[i];
- memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
- ranging_inst->range_status = NAN_RANGING_REQUIRED;
- ranging_inst->svc_inst_id = svc_id;
}
+ if (i == NAN_MAX_RANGING_INST) {
+ WL_ERR(("No buffer available for the ranging instance"));
+ goto done;
+ }
+ ranging_inst = &cfg->nan_ranging_info[i];
+ memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ ranging_inst->prev_distance_mm = INVALID_DISTANCE;
+ ranging_inst->range_role = range_role;
+ ranging_inst->in_use = TRUE;
+
done:
return ranging_inst;
}
return res;
}
-#ifdef WL_NAN_DISC_CACHE
-static int
+int
wl_cfgnan_cancel_ranging(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint8 range_id, uint32 *status)
+ struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status)
{
bcm_iov_batch_buf_t *nan_buf = NULL;
s32 ret = BCME_OK;
bcm_iov_batch_subcmd_t *sub_cmd = NULL;
wl_nan_iov_t *nan_iov_data = NULL;
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_range_cancel_ext_t rng_cncl;
+ uint8 size_of_iov;
NAN_DBG_ENTER();
+ if (cfg->nancfg.version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
+ size_of_iov = sizeof(rng_cncl);
+ } else {
+ size_of_iov = sizeof(range_id);
+ }
+
+ memset_s(&rng_cncl, sizeof(rng_cncl), 0, sizeof(rng_cncl));
+ rng_cncl.range_id = range_id;
+ rng_cncl.flags = flags;
+
nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
if (!nan_buf) {
WL_ERR(("%s: memory allocation failed\n", __func__));
sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(range_id), &subcmd_len);
+ size_of_iov, &subcmd_len);
if (unlikely(ret)) {
WL_ERR(("nan_sub_cmd check failed\n"));
goto fail;
}
sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(range_id);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
/* Reduce the iov_len size by subcmd_len */
nan_iov_end = nan_iov_data->nan_iov_len;
nan_buf_size = (nan_iov_start - nan_iov_end);
- memcpy(sub_cmd->data, &range_id, sizeof(range_id));
+ if (size_of_iov >= sizeof(rng_cncl)) {
+ (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ &rng_cncl, size_of_iov);
+ } else {
+ (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
+ &range_id, size_of_iov);
+ }
nan_buf->is_set = true;
nan_buf->count++;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf),
+ 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(*status)) {
- WL_ERR(("Range cancel failed ret %d status %d \n", ret, *status));
+ WL_ERR(("Range ID %d cancel failed ret %d status %d \n", range_id, ret, *status));
goto fail;
}
- WL_INFORM(("Range cancel with Range ID [%d] successfull\n", range_id));
+ WL_MEM(("Range cancel with Range ID [%d] successfull\n", range_id));
fail:
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
return ret;
}
+#ifdef WL_NAN_DISC_CACHE
static int
wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
- nan_discover_cmd_data_t *cmd_data, uint16 cmd_id)
+ nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
{
int ret = BCME_OK;
int i;
nan_svc_info_t *svc_info;
+ uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
+ cmd_data->pub_id;
for (i = 0; i < NAN_MAX_SVC_INST; i++) {
+ if (update) {
+ if (cfg->svc_info[i].svc_id == svc_id) {
+ svc_info = &cfg->svc_info[i];
+ break;
+ } else {
+ continue;
+ }
+ }
if (!cfg->svc_info[i].svc_id) {
svc_info = &cfg->svc_info[i];
break;
goto fail;
}
if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
- WL_TRACE(("%s:updating ranging info", __FUNCTION__));
+ WL_TRACE(("%s: updating ranging info, enabling", __FUNCTION__));
svc_info->status = 1;
svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
svc_info->ranging_ind = cmd_data->ranging_indication;
svc_info->ingress_limit = cmd_data->ingress_limit;
svc_info->egress_limit = cmd_data->egress_limit;
svc_info->ranging_required = 1;
+ } else {
+ WL_TRACE(("%s: updating ranging info, disabling", __FUNCTION__));
+ svc_info->status = 0;
+ svc_info->ranging_interval = 0;
+ svc_info->ranging_ind = 0;
+ svc_info->ingress_limit = 0;
+ svc_info->egress_limit = 0;
+ svc_info->ranging_required = 0;
}
+
+ /* Reset Range status flags on svc creation/update */
+ svc_info->svc_range_status = 0;
+ svc_info->flags = cmd_data->flags;
+
if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
svc_info->svc_id = cmd_data->sub_id;
if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
(cmd_data->tx_match.dlen)) {
- memcpy(svc_info->tx_match_filter,
+ ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
cmd_data->tx_match.data, cmd_data->tx_match.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto fail;
+ }
svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
}
} else {
svc_info->svc_id = cmd_data->pub_id;
}
- memcpy(svc_info->svc_hash, cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
+ ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
+ cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ }
fail:
return ret;
}
-/* terminate all ranging sessions associated with a svc */
-static int
-wl_cfgnan_terminate_ranging_sessions(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint8 svc_id)
+static bool
+wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
{
- /* cancel all related ranging instances */
- uint8 i;
- int ret = BCME_OK;
- uint32 status;
- nan_ranging_inst_t *ranging_inst;
- nan_svc_info_t *svc;
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->range_id && ranging_inst->svc_inst_id == svc_id) {
- ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
- &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
- __FUNCTION__, ret, status));
+ int i = 0;
+ bool cleared = FALSE;
+
+ if (svc && ranging_inst->in_use) {
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (svc == ranging_inst->svc_idx[i]) {
+ ranging_inst->num_svc_ctx--;
+ ranging_inst->svc_idx[i] = NULL;
+ cleared = TRUE;
+ /*
+ * This list is maintained dupes free,
+ * hence can break
+ */
+ break;
}
- memset(ranging_inst, 0, sizeof(nan_ranging_inst_t));
- WL_DBG(("Range cancelled \n"));
}
}
+ return cleared;
+}
- /* clear command ranging info */
- svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
- if (svc) {
- WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
- memset(svc, 0, sizeof(*svc));
+static int
+wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
+{
+ nan_ranging_inst_t *ranging_inst;
+ int i = 0;
+ int ret = BCME_OK;
+
+ nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (!svc) {
+ WL_ERR(("\n svc not found \n"));
+ ret = BCME_NOTFOUND;
+ goto done;
+ }
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &(cfg->nan_ranging_info[i]);
+ wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
}
+
+done:
return ret;
}
static int
-wl_cfgnan_check_disc_res_for_ranging(struct bcm_cfg80211 *cfg,
- nan_event_data_t* nan_event_data)
+wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, uint8 svc_id)
{
- nan_svc_info_t *svc;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ nan_svc_info_t *svc = NULL;
+ bool cleared = FALSE;
int ret = BCME_OK;
- svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+ if (!ranging_inst || !ranging_inst->in_use) {
+ goto done;
+ }
- if (svc && svc->ranging_required) {
- nan_ranging_inst_t *ranging_inst;
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
- &nan_event_data->remote_nmi, nan_event_data->sub_id, TRUE);
- if (ranging_inst->range_status !=
- NAN_RANGING_IN_PROGRESS) {
- WL_DBG(("Trigger range request\n"));
- ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
- cfg, ranging_inst, svc, NAN_RANGE_REQ_CMD);
- if (unlikely(ret)) {
- WL_ERR(("Failed to trigger ranging, ret = (%d)\n", ret));
- memset(ranging_inst, 0, sizeof(*ranging_inst));
- goto exit;
- }
- cfg->nancfg.range_type = GEOFENCE_NAN_RTT;
- }
- /* Disc event will be given on receving range_rpt event */
- WL_TRACE(("Disc event will given when Range RPT event is recvd"));
- } else {
- ret = BCME_UNSUPPORTED;
+ WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
+ " range_id = %d, peer addr = " MACDBG " \n", svc_id,
+ ranging_inst->range_id, MAC2STRDBG(peer)));
+ svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (!svc) {
+ WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
+ ret = BCME_NOTFOUND;
+ goto done;
}
-exit:
+
+ cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
+ if (!cleared) {
+ /* Only if this svc was cleared, any update needed */
+ ret = BCME_NOTFOUND;
+ goto done;
+ }
+
+ wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
+
+done:
return ret;
}
-/* ranging reqeust event handler */
-static int
-wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
- wl_nan_ev_rng_req_ind_t *rng_ind)
+#ifdef RTT_SUPPORT
+/* API to terminate/clear all directed nan-rtt sessions.
+* Can be called from framework RTT stop context
+*/
+int
+wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg)
{
- int ret = BCME_OK;
- nan_svc_info_t *svc = NULL;
- nan_ranging_inst_t *ranging_inst = NULL;
- uint8 i;
+ nan_ranging_inst_t *ranging_inst;
+ int i, ret = BCME_OK;
+ uint32 status;
- WL_DBG(("Trigger range response\n"));
for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- if (cfg->svc_info[i].ranging_required) {
- svc = &cfg->svc_info[i];
- }
- }
- if (!svc) {
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
- 0, TRUE);
- ranging_inst->range_id = rng_ind->rng_id;
- ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- ranging_inst, NULL, NAN_RANGE_REQ_EVNT);
- if (unlikely(ret)) {
- WL_ERR(("Failed to trigger range response, ret = (%d)\n", ret));
- memset(ranging_inst, 0, sizeof(*ranging_inst));
- goto exit;
- }
- } else {
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
- svc->svc_id, TRUE);
- if (ranging_inst && ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
- ranging_inst->range_id = rng_ind->rng_id;
- ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- ranging_inst, svc, NAN_RANGE_REQ_EVNT);
- if (unlikely(ret)) {
- WL_ERR(("Failed to trigger range response, ret = (%d)\n", ret));
- memset(ranging_inst, 0, sizeof(*ranging_inst));
- goto exit;
+ ranging_inst = &cfg->nan_ranging_info[i];
+ if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan range cancel failed ret = %d status = %d\n",
+ ret, status));
+ }
}
- } else {
- WL_INFORM(("Ranging for the peer already in progress"));
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SHCED_HOST_DIRECTED_TERM);
}
}
-
-exit:
return ret;
}
+#endif /* RTT_SUPPORT */
-/* ranging quest and response iovar handler */
+/*
+ * suspend ongoing geofence ranging session
+ * with a peer if on-going ranging is with given peer
+ * If peer NULL,
+ * Suspend on-going ranging blindly
+ * Do nothing on:
+ * If ranging is not in progress
+ * If ranging in progress but not with given peer
+ */
int
-wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- void *ranging_ctxt, nan_svc_info_t *svc, uint8 range_cmd)
+wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
+ struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_range_req_t *range_req = NULL;
- wl_nan_range_resp_t *range_resp = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ int ret = BCME_OK;
uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
- nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
- nan_avail_cmd_data cmd_data;
-
- NAN_DBG_ENTER();
+ nan_ranging_inst_t *ranging_inst = NULL;
+ struct ether_addr* peer_addr = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_geofence_target_info_t *geofence_target_info;
- memset(&cmd_data, 0, sizeof(cmd_data));
- memcpy(&cmd_data.peer_nmi, &ranging_inst->peer_addr, ETHER_ADDR_LEN);
- cmd_data.avail_period = NAN_RANGING_PERIOD;
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data, WL_AVAIL_LOCAL);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
- goto fail;
+ geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
+ if (!geofence_target_info) {
+ WL_DBG(("No Geofencing Targets, suspend req dropped\n"));
+ goto exit;
}
+ peer_addr = &geofence_target_info->peer_addr;
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data, WL_AVAIL_RANGING);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
- goto fail;
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+ if (dhd_rtt_get_geofence_rtt_state(dhd) == FALSE) {
+ WL_DBG(("Geofencing Ranging not in progress, suspend req dropped\n"));
+ goto exit;
}
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
+ if (peer && memcmp(peer_addr, peer, ETHER_ADDR_LEN)) {
+ if (suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER ||
+ suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER) {
+ /* NDP and Ranging can coexist with different Peers */
+ WL_DBG(("Geofencing Ranging not in progress with given peer,"
+ " suspend req dropped\n"));
+ goto exit;
+ }
}
+#endif /* RTT_SUPPORT */
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
+ ASSERT((ranging_inst != NULL));
+ if (ranging_inst) {
+ if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
+ WL_DBG(("Ranging Inst with peer not in progress, "
+ " suspend req dropped\n"));
+ goto exit;
+ }
+ cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg,
+ ranging_inst->range_id, cancel_flags, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
+ " range_id = %d, suspend_reason = %d, " MACDBG " \n",
+ ret, status, ranging_inst->range_id,
+ suspend_reason, MAC2STRDBG(peer_addr)));
+ }
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ WL_INFORM_MEM(("Geofence Range suspended, range_id = %d,"
+ " suspend_reason = %d, " MACDBG " \n", ranging_inst->range_id,
+ suspend_reason, MAC2STRDBG(peer_addr)));
+#ifdef RTT_SUPPORT
+ /* Set geofence RTT in progress state to false */
+ dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
+#endif /* RTT_SUPPORT */
+ }
+
+exit:
+ /* Post pending discovery results */
+ if (ranging_inst &&
+ ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
+ (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
+ wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
+ }
+
+ return ret;
+}
+
+static void
+wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
+ wl_nan_instance_id svc_id)
+{
+ nan_svc_info_t *svc;
+ svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
+ if (svc) {
+ WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
+ memset(svc, 0, sizeof(*svc));
+ }
+}
+
+/*
+ * Terminate given ranging instance
+ * if no pending ranging sub service
+ */
+static void
+wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *ranging_inst)
+{
+ int ret = BCME_OK;
+ uint32 status;
+#ifdef RTT_SUPPORT
+ rtt_geofence_target_info_t* geofence_target = NULL;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ int8 index;
+#endif /* RTT_SUPPORT */
+
+ if (ranging_inst->range_id == 0) {
+ /* Make sure, range inst is valid in caller */
+ return;
+ }
+
+ if (ranging_inst->num_svc_ctx != 0) {
+ /*
+ * Make sure to remove all svc_insts for range_inst
+ * in order to cancel ranging and remove target in caller
+ */
+ return;
+ }
+
+ /* Cancel Ranging if in progress for rang_inst */
+ if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
+ __FUNCTION__, ret, status));
+ } else {
+ WL_DBG(("Range cancelled \n"));
+ /* Set geofence RTT in progress state to false */
+#ifdef RTT_SUPPORT
+ dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
+#endif /* RTT_SUPPORT */
+ }
+ }
+
+#ifdef RTT_SUPPORT
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &ranging_inst->peer_addr, &index);
+ if (geofence_target) {
+ dhd_rtt_remove_geofence_target(dhd, &geofence_target->peer_addr);
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(ranging_inst->peer_addr))));
+ bzero(ranging_inst, sizeof(nan_ranging_inst_t));
+ }
+#endif /* RTT_SUPPORT */
+}
+
+/*
+ * Terminate all ranging sessions
+ * with no pending ranging sub service
+ */
+static void
+wl_cfgnan_terminate_all_obsolete_ranging_sessions(
+ struct bcm_cfg80211 *cfg)
+{
+ /* cancel all related ranging instances */
+ uint8 i = 0;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nan_ranging_info[i];
+ if (ranging_inst->in_use) {
+ wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
+ }
+ }
+
+ return;
+}
+
+/*
+ * Store svc_ctx for processing during RNG_RPT
+ * Return BCME_OK only when svc is added
+ */
+static int
+wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
+ nan_svc_info_t *svc)
+{
+ int ret = BCME_OK;
+ int i = 0;
+
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (ranging_inst->svc_idx[i] == svc) {
+ WL_DBG(("SVC Ctx for ranging already present, "
+ " Duplication not supported: sub_id: %d\n", svc->svc_id));
+ ret = BCME_UNSUPPORTED;
+ goto done;
+ }
+ }
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ if (ranging_inst->svc_idx[i]) {
+ continue;
+ } else {
+ WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
+ ranging_inst->svc_idx[i] = svc;
+ ranging_inst->num_svc_ctx++;
+ ret = BCME_OK;
+ goto done;
+ }
+ }
+ if (i == MAX_SUBSCRIBES) {
+ WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
+ "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+done:
+ return ret;
+}
+
+#ifdef RTT_SUPPORT
+int
+wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
+ struct ether_addr *peer_addr)
+{
+ int ret = BCME_OK;
+ int err_at = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int8 index = -1;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_geofence_target_info_t* geofence_target;
+ nan_ranging_inst_t *ranging_inst;
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
+
+ if (!ranging_inst) {
+ WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
+ MAC2STRDBG(peer_addr)));
+ ASSERT(0);
+ /* Ranging inst should have been added before adding target */
+ dhd_rtt_remove_geofence_target(dhd, peer_addr);
+ ret = BCME_ERROR;
+ err_at = 1;
+ goto exit;
+ }
+
+ ASSERT(ranging_inst->range_status !=
+ NAN_RANGING_IN_PROGRESS);
+
+ if (ranging_inst->range_status !=
+ NAN_RANGING_IN_PROGRESS) {
+ WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
+ ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
+ cfg, ranging_inst, ranging_inst->svc_idx[0],
+ NAN_RANGE_REQ_CMD, TRUE);
+ if (ret != BCME_OK) {
+ /* Unsupported is for already ranging session for peer */
+ if (ret == BCME_BUSY) {
+ /* TODO: Attempt again over a timer */
+ err_at = 2;
+ } else {
+ /* Remove target and clean ranging inst */
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &ranging_inst->peer_addr, &index);
+ if (geofence_target) {
+ dhd_rtt_remove_geofence_target(dhd,
+ &geofence_target->peer_addr);
+ }
+ bzero(ranging_inst, sizeof(nan_ranging_inst_t));
+ err_at = 3;
+ goto exit;
+ }
+ }
+ } else {
+ /* already in progress..This should not happen */
+ ASSERT(0);
+ ret = BCME_ERROR;
+ err_at = 4;
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
+ "trigger ranging, peer: " MACDBG " ret"
+ " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
+ ret, err_at));
+ }
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+
+static int
+wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
+ nan_event_data_t* nan_event_data)
+{
+ nan_svc_info_t *svc;
+ int ret = BCME_OK;
+#ifdef RTT_SUPPORT
+ rtt_geofence_target_info_t geofence_target;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ uint8 index;
+#endif /* RTT_SUPPORT */
+ bool add_target;
+
+ svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
+
+ if (svc && svc->ranging_required) {
+ nan_ranging_inst_t *ranging_inst;
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &nan_event_data->remote_nmi,
+ NAN_RANGING_ROLE_INITIATOR);
+ if (!ranging_inst) {
+ ret = BCME_NORESOURCE;
+ goto exit;
+ }
+ ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
+
+ /* For responder role, range state should be in progress only */
+ ASSERT(ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR ||
+ ranging_inst->range_status == NAN_RANGING_IN_PROGRESS);
+
+ /*
+ * On rec disc result with ranging required, add target, if
+ * ranging role is responder (range state has to be in prog always)
+ * Or ranging role is initiator and ranging is not already in prog
+ */
+ add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
+ ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
+ (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS)));
+ if (add_target) {
+ WL_DBG(("Add Range request to geofence target list\n"));
+#ifdef RTT_SUPPORT
+ memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
+ ETHER_ADDR_LEN);
+ /* check if target is already added */
+ if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
+ {
+ ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ goto exit;
+ } else {
+ WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
+ MAC2STRDBG(&geofence_target.peer_addr),
+ svc->svc_id));
+ }
+ ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
+ }
+#endif /* RTT_SUPPORT */
+ if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
+ != BCME_OK) {
+ goto exit;
+ }
+#ifdef RTT_SUPPORT
+ if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ /* Adding RTT target while responder, leads to role concurrency */
+ dhd_rtt_set_role_concurrency_state(dhd, TRUE);
+ }
+ else {
+ /* Trigger/Reset geofence RTT */
+ wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
+ RTT_SCHED_SUB_MATCH);
+ }
+#endif /* RTT_SUPPORT */
+ } else {
+ /* Target already added, check & add svc_inst ref to rang_inst */
+ wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
+ }
+ /* Disc event will be given on receving range_rpt event */
+ WL_TRACE(("Disc event will given when Range RPT event is recvd"));
+ } else {
+ ret = BCME_UNSUPPORTED;
+ }
+
+exit:
+ return ret;
+}
+
+bool
+wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+ uint8 rng_progress_count = 0;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nan_ranging_info[i];
+ if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
+ rng_progress_count++;
+ }
+ }
+
+ ASSERT(rng_progress_count <= NAN_MAX_RANGING_SSN_ALLOWED);
+ if (rng_progress_count == NAN_MAX_RANGING_SSN_ALLOWED) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+uint8
+wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg)
+{
+ int i = 0;
+ uint8 num_resp_cancelled = 0;
+ int status, ret;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nan_ranging_info[i];
+ if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS &&
+ ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ num_resp_cancelled++;
+ WL_ERR((" Cancelling responder\n"));
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
+ " existing ranging, ret = (%d)\n", ret));
+ }
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(ranging_inst->peer_addr))));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ }
+ }
+ return num_resp_cancelled;
+}
+
+#ifdef RTT_SUPPORT
+/* ranging reqeust event handler */
+static int
+wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_req_ind_t *rng_ind)
+{
+ int ret = BCME_OK;
+ nan_ranging_inst_t *ranging_inst = NULL;
+ uint32 status;
+ uint8 cancel_flags = 0;
+ bool accept = TRUE;
+ nan_ranging_inst_t tmp_rng_inst;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ WL_DBG(("Trigger range response\n"));
+
+ /* check if we are already having any ranging session with peer.
+ * If so below are the policies
+ * If we are already a Geofence Initiator or responder w.r.t the peer
+ * then silently teardown the current session and accept the REQ.
+ * If we are in direct rtt initiator role then reject.
+ */
+ ranging_inst = wl_cfgnan_check_for_ranging(cfg, &(rng_ind->peer_m_addr));
+ if (ranging_inst) {
+ if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
+ ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
+ WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel current"
+ " ssn and accept new one, range_type = %d, role = %d\n",
+ ranging_inst->range_type, ranging_inst->range_role));
+ cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
+ NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN;
+
+ if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
+ ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
+ wl_cfgnan_suspend_geofence_rng_session(ndev,
+ &(rng_ind->peer_m_addr), RTT_GEO_SUSPN_PEER_RTT_TRIGGER,
+ cancel_flags);
+ } else {
+ ret = wl_cfgnan_cancel_ranging(ndev, cfg,
+ ranging_inst->range_id, cancel_flags, &status);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_cfgnan_handle_ranging_ind: Failed to cancel"
+ " existing ranging, ret = (%d)\n", ret));
+ goto done;
+ }
+ }
+ ranging_inst->range_status = NAN_RANGING_REQUIRED;
+ ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
+ ranging_inst->range_type = 0;
+ } else {
+ WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
+ ret = BCME_BUSY;
+ goto done;
+ }
+ } else {
+ /* Check if new Ranging session is allowed */
+ if (!wl_cfgnan_ranging_allowed(cfg)) {
+ WL_ERR(("Cannot allow more ranging sessions \n"));
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+
+ ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
+ NAN_RANGING_ROLE_RESPONDER);
+ if (!ranging_inst) {
+ WL_ERR(("Failed to create ranging instance \n"));
+ ASSERT(0);
+ ret = BCME_NORESOURCE;
+ goto done;
+ }
+ }
+
+done:
+ if (ret != BCME_OK) {
+ /* reject the REQ using temp ranging instance */
+ bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
+ ranging_inst = &tmp_rng_inst;
+ (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
+ &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
+ accept = FALSE;
+ }
+
+ ranging_inst->range_id = rng_ind->rng_id;
+
+ WL_DBG(("Trigger Ranging at Responder\n"));
+ ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
+ NULL, NAN_RANGE_REQ_EVNT, accept);
+ if (unlikely(ret) || !accept) {
+ WL_ERR(("Failed to handle range request, ret = (%d) accept %d\n",
+ ret, accept));
+ bzero(ranging_inst, sizeof(*ranging_inst));
+ }
+
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+/* ranging quest and response iovar handler */
+int
+wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ void *ranging_ctxt, nan_svc_info_t *svc,
+ uint8 range_cmd, bool accept_req)
+{
+ s32 ret = BCME_OK;
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ wl_nan_range_req_t *range_req = NULL;
+ wl_nan_range_resp_t *range_resp = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
+ nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
+ nan_avail_cmd_data cmd_data;
+
+ NAN_DBG_ENTER();
+
+ memset_s(&cmd_data, sizeof(cmd_data),
+ 0, sizeof(cmd_data));
+ ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
+ &ranging_inst->peer_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ranging peer addr\n"));
+ goto fail;
+ }
+
+ cmd_data.avail_period = NAN_RANGING_PERIOD;
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data, WL_AVAIL_LOCAL);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
+ goto fail;
+ }
+
+ ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
+ cfg, &cmd_data, WL_AVAIL_RANGING);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
+ goto fail;
+ }
+
+ nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
if (svc) {
range_req->interval = svc->ranging_interval;
/* Limits are in cm from host */
- range_req->ingress = (svc->ingress_limit*10);
- range_req->egress = (svc->egress_limit*10);
- range_req->indication = svc->ranging_ind;
- } else {
- range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
+ range_req->ingress = svc->ingress_limit;
+ range_req->egress = svc->egress_limit;
}
+ range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
} else {
/* range response config */
sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
range_resp->range_id = ranging_inst->range_id;
range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
- if (svc) {
- range_resp->status = svc->status;
- } else {
+ if (accept_req) {
range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
+ } else {
+ range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
}
nan_buf->is_set = true;
}
OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
nan_buf->count++;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
&status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
if (unlikely(ret) || unlikely(status)) {
WL_ERR(("nan ranging failed ret = %d status = %d\n",
ret, status));
- ret = status;
+ ret = (ret == BCME_OK) ? status : ret;
goto fail;
}
WL_TRACE(("nan ranging trigger successful\n"));
+ if (range_cmd == NAN_RANGE_REQ_CMD) {
+ WL_MEM(("Ranging Req Triggered"
+ " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
+ MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
+ range_req->ingress, range_req->egress));
+ } else {
+ WL_MEM(("Ranging Resp Triggered"
+ " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
+ MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
+ range_resp->ingress, range_resp->egress));
+ }
/* check the response buff for request */
if (range_cmd == NAN_RANGE_REQ_CMD) {
ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
&ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
- WL_TRACE(("\n ranging instance returned %d\n", ranging_inst->range_id));
+ WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
}
/* Preventing continuous range requests */
ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
{
uint8* filter_idx = (uint8*)ctx;
/* Steps 1 and 2 as explained in Section 6.2 */
/* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
b = hndcrc32((uint8*)input, input_len, b);
+ GCC_DIAGNOSTIC_POP();
/* Obtain the last 2 bytes of the CRC32 output */
b &= NAN_BLOOM_CRC32_MASK;
if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
(cmd_data->svc_hash.data)) {
- memcpy((uint8*)sd_params->svc_hash, cmd_data->svc_hash.data,
+ ret = memcpy_s((uint8*)sd_params->svc_hash,
+ sizeof(sd_params->svc_hash),
+ cmd_data->svc_hash.data,
cmd_data->svc_hash.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ goto fail;
+ }
#ifdef WL_NAN_DEBUG
prhex("hashed svc name", cmd_data->svc_hash.data,
cmd_data->svc_hash.dlen);
if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
- cmd_data->sde_svc_info.dlen));
+ cmd_data->sde_svc_info.dlen));
ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
WL_NAN_XTLV_SD_SDE_SVC_INFO,
cmd_data->sde_svc_info.dlen,
ret = -ENOMEM;
goto fail;
}
- memcpy(srf_mac, &srf_control, NAN_SRF_CTRL_FIELD_LEN);
- memcpy(srf_mac+1, cmd_data->mac_list.list,
+ ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
+ &srf_control, NAN_SRF_CTRL_FIELD_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control\n"));
+ goto fail;
+ }
+ ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
+ cmd_data->mac_list.list,
(srf_size - NAN_SRF_CTRL_FIELD_LEN));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control mac list\n"));
+ goto fail;
+ }
ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
BCM_XTLV_OPTION_ALIGN32);
}
/* Bloom filter */
srf_control |= 0x1;
- /* Instance id must be from 0 to 254, 255 is vendor specific */
- if (sd_params->instance_id <= NAN_ID_MIN ||
- sd_params->instance_id > (NAN_ID_MAX - 1)) {
- WL_ERR(("Invalid instance id\n"));
+ /* Instance id must be from 1 to 255, 0 is Reserved */
+ if (sd_params->instance_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid instance id: %d\n",
+ sd_params->instance_id));
ret = BCME_BADARG;
goto fail;
}
srftmp += ETHER_ADDR_LEN;
}
- memcpy(srf, &srf_control, NAN_SRF_CTRL_FIELD_LEN);
+ ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
+ &srf_control, NAN_SRF_CTRL_FIELD_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy srf control\n"));
+ goto fail;
+ }
ret = bcm_bloom_get_filter_data(bp, bloom_len,
(srf + NAN_SRF_CTRL_FIELD_LEN),
&bloom_size);
WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
srf, BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
+ WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
goto fail;
}
} else {
}
}
- if (cmd_data->sde_control_flag) {
+ if (cmd_data->sde_control_config) {
ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
WL_NAN_XTLV_SD_SDE_CONTROL,
sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
*data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
if (cmd_data->scid.dlen)
*data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->sde_control_flag)
+ if (cmd_data->sde_control_config)
*data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
if (cmd_data->life_count)
*data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
sub_cmd->id = htod16(cmd_id);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, &instance_id, sizeof(instance_id));
+
+ ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
+ &instance_id, sizeof(instance_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
+ goto fail;
+ }
+
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
&(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
if (cmd_data->svc_update) {
ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
if (ret != BCME_OK) {
+ WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
goto fail;
} else {
/* Ignoring any other svc get error */
if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
+ WL_ERR(("Bad instance status, failed to update svc handler\n"));
goto fail;
}
}
} else {
WL_DBG(("nan svc successful\n"));
#ifdef WL_NAN_DISC_CACHE
- if (!cmd_data->svc_update) { /* cache new service */
- ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id);
- if (ret < 0) {
- WL_ERR(("%s: fail to cache svc info, ret=%d\n",
- __FUNCTION__, ret));
- goto fail;
- }
- } else {
- WL_DBG(("skipping caching for update of svc %d\n", cmd_id));
+ ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
+ if (ret < 0) {
+ WL_ERR(("%s: fail to cache svc info, ret=%d\n",
+ __FUNCTION__, ret));
+ goto fail;
}
#endif /* WL_NAN_DISC_CACHE */
}
struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
{
int ret = BCME_OK;
+
NAN_DBG_ENTER();
NAN_MUTEX_LOCK();
-
/*
* proceed only if mandatory arguments are present - subscriber id,
* service hash
struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
{
int ret = BCME_OK;
+#ifdef WL_NAN_DISC_CACHE
+ nan_svc_info_t *svc_info;
+ uint8 upd_ranging_required;
+#endif /* WL_NAN_DISC_CACHE */
+#ifdef RTT_GEOFENCE_CONT
+#ifdef RTT_SUPPORT
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+#endif /* RTT_SUPPORT */
+#endif /* RTT_GEOFENCE_CONT */
+
NAN_DBG_ENTER();
NAN_MUTEX_LOCK();
goto fail;
}
+ /* Check for ranging sessions if any */
+ if (cmd_data->svc_update) {
+#ifdef WL_NAN_DISC_CACHE
+ svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
+ if (svc_info) {
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
+ /* terminate ranging sessions for this svc, avoid clearing svc cache */
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ WL_DBG(("Ranging sessions handled for svc update\n"));
+ upd_ranging_required = !!(cmd_data->sde_control_flag &
+ NAN_SDE_CF_RANGING_REQUIRED);
+ if ((svc_info->ranging_required ^ upd_ranging_required) ||
+ (svc_info->ingress_limit != cmd_data->ingress_limit) ||
+ (svc_info->egress_limit != cmd_data->egress_limit)) {
+ /* Clear cache info in Firmware */
+ ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
+ if (ret != BCME_OK) {
+ WL_ERR(("couldn't send clear cache to FW \n"));
+ goto fail;
+ }
+ /* Invalidate local cache info */
+ wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
+ }
+ }
+#endif /* WL_NAN_DISC_CACHE */
+ }
+
+#ifdef RTT_GEOFENCE_CONT
+#ifdef RTT_SUPPORT
+ /* Override ranging Indication */
+ if (rtt_status->geofence_cfg.geofence_cont) {
+ if (cmd_data->ranging_indication !=
+ NAN_RANGE_INDICATION_NONE) {
+ cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
+ }
+ }
+#endif /* RTT_SUPPORT */
+#endif /* RTT_GEOFENCE_CONT */
ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
if (ret < 0) {
WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
sub_cmd->id = htod16(cmd_id);
sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- memcpy(sub_cmd->data, &instance_id, sizeof(instance_id));
+ ret = memcpy_s(sub_cmd->data, *nan_buf_size,
+ &instance_id, sizeof(instance_id));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
+ goto fail;
+ }
/* adjust iov data len to the end of last data record */
*nan_buf_size -= (sub_cmd->len +
OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
}
#ifdef WL_NAN_DISC_CACHE
- /* terminate ranging sessions for this svc */
- wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->pub_id);
+ wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
#endif /* WL_NAN_DISC_CACHE */
ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
&nan_buf->cmds[0], &nan_buf_size);
nan_buf->is_set = true;
nan_buf->count++;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
&(cmd_data->status),
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
goto fail;
}
WL_DBG(("nan cancel publish successfull\n"));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
fail:
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
#ifdef WL_NAN_DISC_CACHE
/* terminate ranging sessions for this svc */
- wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->sub_id);
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ /* clear svc cache for the service */
+ wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
#endif /* WL_NAN_DISC_CACHE */
nan_buf->is_set = true;
nan_buf->count++;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
&(cmd_data->status),
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
goto fail;
}
WL_DBG(("subscribe cancel successfull\n"));
+ wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
fail:
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
- /* local instance id must be from 0 to 254, 255 is vendor specific */
- if (cmd_data->local_id <= NAN_ID_MIN ||
- cmd_data->local_id > (NAN_ID_MAX - 1)) {
- WL_ERR(("Invalid local instance id\n"));
+ /* local instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->local_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
ret = BCME_BADARG;
goto fail;
}
sd_xmit->local_service_id = cmd_data->local_id;
is_lcl_id = TRUE;
- /* remote instance id must be from 0 to 254, 255 is vendor specific */
- if (cmd_data->remote_id <= NAN_ID_MIN ||
- cmd_data->remote_id > (NAN_ID_MAX - 1)) {
- WL_ERR(("Invalid remote instance id\n"));
+ /* remote instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->remote_id == NAN_ID_RESERVED) {
+ WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
ret = BCME_BADARG;
goto fail;
}
is_dest_id = TRUE;
if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
- memcpy(&sd_xmit->destination_addr, &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
+ &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy dest mac address\n"));
+ goto fail;
+ }
} else {
WL_ERR(("Invalid ether addr provided\n"));
ret = BCME_BADARG;
sd_xmit->token, ret, cmd_data->status));
goto fail;
}
- WL_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
+ WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
fail:
if (nan_buf) {
MFREE(cfg->osh, nan_buf, data_size);
bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
const bcm_xtlv_t *xtlv;
- uint16 type = 0, len = 0;
+ uint16 type = 0;
+ int len = 0;
NAN_DBG_ENTER();
nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
WL_ERR(("xtlv not found: err = %d\n", ret));
goto fail;
}
- bcm_xtlv_unpack_xtlv(xtlv, &type, &len, NULL, BCM_XTLV_OPTION_ALIGN32);
+ bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
do
{
switch (type) {
ret = BCME_BADARG;
goto fail;
}
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
+ GCC_DIAGNOSTIC_POP();
break;
default:
- WL_ERR(("Unknown xtlv: id %d ", type));
+ WL_ERR(("Unknown xtlv: id %u\n", type));
ret = BCME_ERROR;
break;
}
if (ret != BCME_OK) {
goto fail;
}
- } while ((xtlv = bcm_next_xtlv(xtlv, (int *)&len, BCM_XTLV_OPTION_ALIGN32)));
+ } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
capabilities->max_publishes = fw_cap->max_svc_publishes;
nan_buf->count++;
nan_buf->is_set = true;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
return ret;
}
+void
+wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
+{
+ uint8 i = 0;
+ cfg->nan_dp_count = 0;
+ cfg->nan_init_state = false;
+#ifdef WL_NAN_DISC_CACHE
+ if (cfg->nan_disc_cache) {
+ for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
+ if (cfg->nan_disc_cache[i].tx_match_filter.data) {
+ MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
+ cfg->nan_disc_cache[i].tx_match_filter.dlen);
+ }
+ if (cfg->nan_disc_cache[i].svc_info.data) {
+ MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
+ cfg->nan_disc_cache[i].svc_info.dlen);
+ }
+ }
+ MFREE(cfg->osh, cfg->nan_disc_cache,
+ NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
+ cfg->nan_disc_cache = NULL;
+ }
+ cfg->nan_disc_count = 0;
+ memset_s(cfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t),
+ 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
+ memset_s(cfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t),
+ 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
+#endif /* WL_NAN_DISC_CACHE */
+ return;
+}
+
int
wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
{
uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
uint8 buf[NAN_IOCTL_BUF_SIZE];
bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
- uint8 i = 0;
NAN_DBG_ENTER();
NAN_MUTEX_LOCK();
} else {
nan_buf->count++;
nan_buf->is_set = true;
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
nan_buf, nan_buf_size, &status,
(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
}
}
}
+ wl_cfgnan_deinit_cleanup(cfg);
- for (i = 0; i < NAN_MAX_NDI; i++) {
- /* clean NDI data */
- cfg->nancfg.ndi[i].in_use = false;
- cfg->nancfg.ndi[i].created = false;
- memset(&cfg->nancfg.ndi[i].ifname, 0x0, IFNAMSIZ);
- }
-
- cfg->nan_dp_mask = 0;
- cfg->nan_init_state = false;
-#ifdef WL_NAN_DISC_CACHE
- if (cfg->nan_disc_cache) {
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if (cfg->nan_disc_cache[i].tx_match_filter.data) {
- MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
- cfg->nan_disc_cache[i].tx_match_filter.dlen);
- }
- if (cfg->nan_disc_cache[i].svc_info.data) {
- MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
- cfg->nan_disc_cache[i].svc_info.dlen);
- }
- }
- MFREE(cfg->osh, cfg->nan_disc_cache,
- NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
- cfg->nan_disc_cache = NULL;
- }
- cfg->nan_disc_count = 0;
- memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
- memset(cfg->nan_ranging_info, 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
-#endif /* WL_NAN_DISC_CACHE */
fail:
- if (!cfg->nancfg.mac_rand) {
+ if (!cfg->nancfg.mac_rand && !ETHER_ISNULLADDR(cfg->nan_nmi_mac)) {
wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
}
NAN_MUTEX_UNLOCK();
if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
mac_addr) != BCME_OK) {
ret = -EINVAL;
+ WL_ERR(("Failed to get mac addr for NDI\n"));
goto fail;
}
}
{
u8 mac_addr[ETH_ALEN];
s32 ret = BCME_OK;
+ s32 idx;
+ struct wireless_dev *wdev;
NAN_DBG_ENTER();
if (busstate != DHD_BUS_DOWN) {
if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
+ if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
+ WL_ERR(("No free idx for NAN NDI\n"));
+ ret = BCME_NORESOURCE;
+ goto fail;
+ }
+
ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
if (ret != BCME_OK) {
WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
goto fail;
}
- if (wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
- ifname, mac_addr) == NULL) {
+ wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
+ ifname, mac_addr);
+ if (!wdev) {
ret = -ENODEV;
+ WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
goto fail;
}
+ /* Store the iface name to pub data so that it can be used
+ * during NAN enable
+ */
+ wl_cfgnan_add_ndi_data(cfg, idx, ifname);
+ cfg->nancfg.ndi[idx].created = true;
+ /* Store nan ndev */
+ cfg->nancfg.ndi[idx].nan_ndev = wdev_to_ndev(wdev);
+
} else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
+ if (ret == BCME_OK) {
+ if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
+ WL_ERR(("Failed to find matching data for ndi:%s\n",
+ ifname));
+ }
+ } else if (ret == -ENODEV) {
+ WL_INFORM(("Already deleted: %s\n", ifname));
+ ret = BCME_OK;
+ } else if (ret != BCME_OK) {
+ WL_ERR(("failed to delete NDI[%d]\n", ret));
+ }
}
} else {
ret = -ENODEV;
return ret;
}
+/*
+ * Return data peer from peer list
+ * for peer_addr
+ * NULL if not found
+ */
+nan_ndp_peer_t *
+wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ uint8 i;
+ nan_ndp_peer_t* peer = cfg->nancfg.nan_ndp_peer_info;
+
+ if (!peer) {
+ WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
+ goto exit;
+ }
+ for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
+ if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
+ (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
+ return &peer[i];
+ }
+ }
+
+exit:
+ return NULL;
+}
+
+/*
+ * Returns True if
+ * datapath exists for nan cfg
+ * for any peer
+ */
+bool
+wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg)
+{
+ bool ret = FALSE;
+ uint8 i;
+ nan_ndp_peer_t* peer = NULL;
+
+ if ((cfg->nan_init_state == FALSE) ||
+ (cfg->nan_enable == FALSE)) {
+ goto exit;
+ }
+
+ if (!cfg->nancfg.nan_ndp_peer_info) {
+ goto exit;
+ }
+
+ peer = cfg->nancfg.nan_ndp_peer_info;
+ for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
+ if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED) {
+ ret = TRUE;
+ break;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+/*
+ * Returns True if
+ * datapath exists for nan cfg
+ * for given peer
+ */
+bool
+wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ bool ret = FALSE;
+ nan_ndp_peer_t* peer = NULL;
+
+ if ((cfg->nan_init_state == FALSE) ||
+ (cfg->nan_enable == FALSE)) {
+ goto exit;
+ }
+
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (peer) {
+ ret = TRUE;
+ }
+
+exit:
+ return ret;
+}
+
+/*
+ * As of now API only available
+ * for setting state to CONNECTED
+ * if applicable
+ */
+void
+wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr, nan_peer_dp_state_t state)
+{
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (!peer) {
+ goto end;
+ }
+ peer->peer_dp_state = state;
+end:
+ return;
+}
+
+/* Adds peer to nan data peer list */
+void
+wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ uint8 i;
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (peer) {
+ peer->dp_count++;
+ goto end;
+ }
+ peer = cfg->nancfg.nan_ndp_peer_info;
+ for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
+ if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
+ break;
+ }
+ }
+ if (i == NAN_MAX_NDP_PEER) {
+ WL_DBG(("DP Peer list full, Droopping add peer req\n"));
+ goto end;
+ }
+ /* Add peer to list */
+ memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
+ peer[i].dp_count = 1;
+ peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
+
+end:
+ return;
+}
+
+/* Removes nan data peer from peer list */
+void
+wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr)
+{
+ nan_ndp_peer_t* peer = NULL;
+ /* check for peer exist */
+ peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
+ if (!peer) {
+ WL_DBG(("DP Peer not present in list, "
+ "Droopping remove peer req\n"));
+ goto end;
+ }
+ peer->dp_count--;
+ if (peer->dp_count == 0) {
+ /* No more NDPs, delete entry */
+ memset(peer, 0, sizeof(nan_ndp_peer_t));
+ } else {
+ /* Set peer dp state to connected if any ndp still exits */
+ peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
+ }
+end:
+ return;
+}
+
int
wl_cfgnan_data_path_request_handler(struct net_device *ndev,
struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
uint16 buflen_avail;
uint8 *pxtlv;
struct wireless_dev *wdev;
-
uint16 nan_buf_size;
uint8 *resp_buf = NULL;
/* Considering fixed params */
nan_buf_size = data_size;
NAN_DBG_ENTER();
+
+ mutex_lock(&cfg->if_sync);
NAN_MUTEX_LOCK();
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
+ WL_ERR(("Conflicting iface found to be active\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+#endif /* WL_IFACE_MGMT */
+
+#ifdef RTT_SUPPORT
+ /* cancel any ongoing RTT session with peer
+ * as we donot support DP and RNG to same peer
+ */
+ wl_cfgnan_clear_peer_ranging(cfg, &cmd_data->mac_addr,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
+#endif /* RTT_SUPPORT */
nan_buf = MALLOCZ(cfg->osh, data_size);
if (!nan_buf) {
}
if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
- memcpy(&datareq->peer_mac, &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
+ &cmd_data->mac_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ether addr provided\n"));
+ goto fail;
+ }
} else {
WL_ERR(("Invalid ether addr provided\n"));
ret = BCME_BADARG;
(char *)cmd_data->ndp_iface);
if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
ret = -EINVAL;
+ WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
+ (char *)cmd_data->ndp_iface));
goto fail;
}
if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- memcpy(&datareq->ndi, wdev->netdev->dev_addr, ETHER_ADDR_LEN);
+ ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
+ wdev->netdev->dev_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy ether addr provided\n"));
+ goto fail;
+ }
WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
__FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
} else {
}
WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
cmd_data->ndp_instance_id));
+ /* Add peer to data ndp peer list */
+ wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
fail:
if (nan_buf) {
if (resp_buf) {
MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
}
-
NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
NAN_DBG_EXIT();
return ret;
}
nan_buf_size = data_size;
NAN_DBG_ENTER();
+
+ mutex_lock(&cfg->if_sync);
NAN_MUTEX_LOCK();
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
+ WL_ERR(("Conflicting iface found to be active\n"));
+ ret = BCME_UNSUPPORTED;
+ goto fail;
+ }
+#endif /* WL_IFACE_MGMT */
nan_buf = MALLOCZ(cfg->osh, data_size);
if (!nan_buf) {
dataresp->status = cmd_data->rsp_code ^= 1;
dataresp->reason_code = 0;
- /* ndp instance id must be from 0 to 255 */
- if (cmd_data->ndp_instance_id <= NAN_ID_MIN ||
+ /* ndp instance id must be from 1 to 255, 0 is reserved */
+ if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
cmd_data->ndp_instance_id > NAN_ID_MAX) {
- WL_ERR(("Invalid ndp instance id\n"));
+ WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
ret = BCME_BADARG;
goto fail;
}
/* Retrieved initiator ndi from NanDataPathRequestInd */
if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
- memcpy(&dataresp->mac_addr, &cfg->initiator_ndi, ETHER_ADDR_LEN);
- } else {
- WL_ERR(("Invalid ether addr retrieved\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- /* Retrieve mac from given iface name */
- wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
- (char *)cmd_data->ndp_iface);
- if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- memcpy(&dataresp->ndi, wdev->netdev->dev_addr, ETHER_ADDR_LEN);
- WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
+ ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
+ &cfg->initiator_ndi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy initiator ndi\n"));
+ goto fail;
+ }
} else {
- WL_ERR(("Invalid NDI addr retrieved\n"));
+ WL_ERR(("Invalid ether addr retrieved\n"));
ret = BCME_BADARG;
goto fail;
}
+ /* Interface is not mandatory, when it is a reject from framework */
+ if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
+ /* Retrieve mac from given iface name */
+ wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
+ (char *)cmd_data->ndp_iface);
+ if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = -EINVAL;
+ WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
+ (char *)cmd_data->ndp_iface));
+ goto fail;
+ }
+
+ if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
+ ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
+ wdev->netdev->dev_addr, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy responder ndi\n"));
+ goto fail;
+ }
+ WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
+ } else {
+ WL_ERR(("Invalid NDI addr retrieved\n"));
+ ret = BCME_BADARG;
+ goto fail;
+ }
+ }
+
dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
if (resp_buf) {
MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
}
-
NAN_MUTEX_UNLOCK();
+ mutex_unlock(&cfg->if_sync);
+
NAN_DBG_EXIT();
return ret;
}
int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
+ struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
+ int *status)
{
bcm_iov_batch_buf_t *nan_buf = NULL;
wl_nan_dp_end_t *dataend = NULL;
goto fail;
}
- /* ndp instance id must be from 0 to 255 */
- if (cmd_data->ndp_instance_id <= NAN_ID_MIN ||
- cmd_data->ndp_instance_id > NAN_ID_MAX) {
- WL_ERR(("Invalid ndp instance id\n"));
+ /* ndp instance id must be from 1 to 255, 0 is reserved */
+ if (ndp_instance_id < NAN_ID_MIN ||
+ ndp_instance_id > NAN_ID_MAX) {
+ WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
ret = BCME_BADARG;
goto fail;
}
sizeof(*dataend);
sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- dataend->lndp_id = cmd_data->ndp_instance_id;
+ dataend->lndp_id = ndp_instance_id;
/*
* Currently fw requires ndp_id and reason to end the data path
nan_buf_size -= (sub_cmd->len +
OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- memset(resp_buf, 0, sizeof(resp_buf));
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &(cmd_data->status),
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
+ status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(*status)) {
WL_ERR(("nan data path end handler failed, error = %d status %d\n",
- ret, cmd_data->status));
+ ret, *status));
goto fail;
}
WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
dataend->lndp_id));
-
fail:
if (nan_buf) {
MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
disc_cache = wl_cfgnan_get_disc_result(cfg,
cmd_data->pub_id, &cmd_data->mac_addr);
+ WL_DBG(("datapath request: PUB ID: = %d\n",
+ cmd_data->pub_id));
if (disc_cache) {
- WL_TRACE(("svc hash present, pack it\n"));
- memcpy(nan_req_resp->svc_hash, disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
+ (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
+ disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
ret = BCME_OK;
+ } else {
+ WL_ERR(("disc_cache is NULL\n"));
+ goto fail;
}
- } else {
- WL_ERR(("Missing mandatory info..pub id %d & pub_mac "MACDBG"\n",
- cmd_data->pub_id, MAC2STRDBG(cmd_data->mac_addr.octet)));
- ret = BCME_BADARG;
}
/* datapath response context */
if (cmd_data->ndp_instance_id) {
+ WL_DBG(("datapath response: NDP ID: = %d\n",
+ cmd_data->ndp_instance_id));
svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
/* Note: svc_info will not be present in OOB cases
* In such case send NMI alone and let HAL handle if
* svc_hash is mandatory
*/
if (svc_info) {
- WL_TRACE(("svc hash present, pack it\n"));
- memcpy(nan_req_resp->svc_hash, svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
+ WL_DBG(("svc hash present, pack it\n"));
+ (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
+ svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
} else {
- WL_MEM(("svc_info not present..assuming OOB DP\n"));
+ WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
}
/* Always send NMI */
- memcpy(nan_req_resp->pub_nmi, cfg->nan_nmi_mac, ETHER_ADDR_LEN);
+ (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
+ cfg->nan_nmi_mac, ETHER_ADDR_LEN);
ret = BCME_OK;
- } else {
- WL_ERR(("Invalid ndp id\n"));
}
fail:
NAN_MUTEX_UNLOCK();
nan_event_data->sub_id = cache->sub_id;
nan_event_data->publish_rssi = cache->publish_rssi;
nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
- memcpy(&nan_event_data->remote_nmi, &cache->peer, ETHER_ADDR_LEN);
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &cache->peer, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached peer nan nmi\n"));
+ goto fail;
+ }
if (cache->svc_info.dlen && cache->svc_info.data) {
nan_event_data->svc_info.dlen = cache->svc_info.dlen;
ret = -ENOMEM;
goto fail;
}
- memcpy(nan_event_data->svc_info.data,
+ ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
cache->svc_info.data, cache->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached svc info data\n"));
+ goto fail;
+ }
}
if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
ret = -ENOMEM;
goto fail;
}
- memcpy(nan_event_data->tx_match_filter.data,
+ ret = memcpy_s(nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen,
cache->tx_match_filter.data, cache->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cached tx match filter data\n"));
+ goto fail;
+ }
}
fail:
NAN_DBG_EXIT();
return ret;
}
#endif /* WL_NAN_DISC_CACHE */
+
+/* API to cancel the ranging with peer
+* For geofence initiator, suspend ranging.
+* for directed RTT initiator , report fail result, cancel ranging
+* and clear ranging instance
+* For responder, cancel ranging and clear ranging instance
+*/
+#ifdef RTT_SUPPORT
+static s32
+wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer, int reason)
+{
+ uint32 status = 0;
+ nan_ranging_inst_t *rng_inst = NULL;
+ int err = BCME_OK;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
+ if (rng_inst) {
+ if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
+ err = wl_cfgnan_suspend_geofence_rng_session(ndev,
+ peer, reason, 0);
+ } else {
+ if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ dhd_rtt_handle_nan_rtt_session_end(dhdp,
+ peer);
+ }
+ /* responder */
+ err = wl_cfgnan_cancel_ranging(ndev, cfg,
+ rng_inst->range_id,
+ NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ bzero(rng_inst, sizeof(*rng_inst));
+ }
+ }
+
+ if (err) {
+ WL_ERR(("Failed to stop ranging with peer %d\n", err));
+ }
+
+ return err;
+}
+#endif /* RTT_SUPPORT */
+
static s32
wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
uint16 data_len, uint16 *tlvs_offset,
wl_nan_ev_datapath_cmn_t *ev_dp;
nan_svc_info_t *svc_info;
bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+#ifdef RTT_SUPPORT
+ nan_ranging_inst_t *rng_inst = NULL;
+#endif /* RTT_SUPPORT */
+
if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
NAN_DBG_ENTER();
nan_event_data->security = ev_dp->security;
/* Store initiator_ndi, required for data_path_response_request */
- memcpy(&cfg->initiator_ndi, &ev_dp->initiator_ndi,
- ETHER_ADDR_LEN);
+ ret = memcpy_s(&cfg->initiator_ndi, ETHER_ADDR_LEN,
+ &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's initiator addr\n"));
+ goto fail;
+ }
if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
nan_event_data->ndp_id = ev_dp->ndp_id;
MAC2STRDBG(ev_dp->responder_ndi.octet)));
WL_TRACE(("PEER NMI: " MACDBG "\n",
MAC2STRDBG(ev_dp->peer_nmi.octet)));
- memcpy(&nan_event_data->remote_nmi, &ev_dp->peer_nmi,
- ETHER_ADDR_LEN);
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_dp->peer_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's peer nmi\n"));
+ goto fail;
+ }
} else {
/* type is multicast */
WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
nan_event_data->ndp_id = ev_dp->mc_id;
WL_TRACE(("PEER NMI: " MACDBG "\n",
MAC2STRDBG(ev_dp->peer_nmi.octet)));
- memcpy(&nan_event_data->remote_nmi, &ev_dp->peer_nmi,
+ ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
+ &ev_dp->peer_nmi,
ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's peer nmi\n"));
+ goto fail;
+ }
}
*tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
OFFSETOF(bcm_xtlv_t, data);
goto fail;
}
svc_info->ndp_id[i] = nan_event_data->ndp_id;
+ /* Add peer to data ndp peer list */
+ wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
+#ifdef RTT_SUPPORT
+ /* cancel any ongoing RTT session with peer
+ * as we donot support DP and RNG to same peer
+ */
+ wl_cfgnan_clear_peer_ranging(cfg, &ev_dp->peer_nmi,
+ RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
+#endif /* RTT_SUPPORT */
ret = BCME_OK;
}
#endif /* WL_NAN_DISC_CACHE */
} else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
*hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
- memcpy(&nan_event_data->responder_ndi, &ev_dp->responder_ndi,
+ ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
+ &ev_dp->responder_ndi,
ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's responder ndi\n"));
+ goto fail;
+ }
WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
MAC2STRDBG(ev_dp->responder_ndi.octet)));
WL_TRACE(("Initiator status %d\n", nan_event_data->status));
} else {
- memcpy(&nan_event_data->responder_ndi, &ev_dp->initiator_ndi,
+ ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
+ &ev_dp->initiator_ndi,
ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy event's responder ndi\n"));
+ goto fail;
+ }
WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
MAC2STRDBG(ev_dp->initiator_ndi.octet)));
}
if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
+ wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
+ NAN_PEER_DP_CONNECTED);
+ wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
} else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
nan_event_data->status = NAN_DP_REQUEST_REJECT;
+ /* Remove peer from data ndp peer list */
+ wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
+#ifdef RTT_SUPPORT
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
+ if (rng_inst) {
+ /* Trigger/Reset geofence RTT */
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_DP_REJECTED);
+ }
+#endif /* RTT_SUPPORT */
} else {
WL_ERR(("%s:Status code = %x not expected\n",
__FUNCTION__, ev_dp->status));
goto fail;
}
WL_TRACE(("Responder status %d\n", nan_event_data->status));
- wl_cfgnan_update_dp_mask(cfg, true, nan_event_data->ndp_id);
} else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
/* Mapping to common struct between DHD and HAL */
*hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
- wl_cfgnan_update_dp_mask(cfg, false, nan_event_data->ndp_id);
#ifdef WL_NAN_DISC_CACHE
if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
/* Only at Responder side,
}
}
#endif /* WL_NAN_DISC_CACHE */
+ /* Remove peer from data ndp peer list */
+ wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
+ wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
+#ifdef RTT_SUPPORT
+ WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG "\n",
+ MAC2STRDBG(&ev_dp->peer_nmi)));
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
+ if (rng_inst) {
+ /* Trigger/Reset geofence RTT */
+ WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
+ MAC2STRDBG(&rng_inst->peer_addr)));
+ wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
+ RTT_SCHED_DP_END);
+ }
+#endif /* RTT_SUPPORT */
}
} else {
/* Follow though, not handling other IDs as of now */
NAN_DBG_EXIT();
return ret;
}
+#define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
+ ((distance) >= (egress)))
+#define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
+#define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
+
+static bool
+wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
+ uint8 *ranging_ind, uint32 prev_distance)
+{
+ uint8 svc_ind = svc_info->ranging_ind;
+ bool notify = FALSE;
+ bool range_rep_ev_once =
+ !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
+ uint32 ingress_limit = svc_info->ingress_limit;
+ uint32 egress_limit = svc_info->egress_limit;
+
+ WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the svc ranging cnd %d"
+ " distance %d prev_distance %d, range_rep_ev_once %d\n",
+ svc_ind, distance, prev_distance, range_rep_ev_once));
+ WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the SVC ingress and"
+ " egress limits %d %d\n", ingress_limit, egress_limit));
+ if (svc_ind & NAN_RANGE_INDICATION_CONT) {
+ *ranging_ind = NAN_RANGE_INDICATION_CONT;
+ notify = TRUE;
+ WL_ERR(("\n%s :Svc has continous Ind %d\n",
+ __FUNCTION__, __LINE__));
+ goto done;
+ }
+ if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
+ NAN_RANGE_INDICATION_EGRESS)) {
+ if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
+ /* if not already in geofence */
+ if ((range_rep_ev_once == FALSE) ||
+ (!IN_GEOFENCE(ingress_limit, egress_limit,
+ prev_distance))) {
+ notify = TRUE;
+ if (distance < ingress_limit) {
+ *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
+ } else {
+ *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
+ }
+ WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
+ __FUNCTION__, __LINE__, *ranging_ind));
+ }
+ }
+ goto done;
+ }
+
+ if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
+ if (IS_INGRESS_VAL(ingress_limit, distance)) {
+ if ((range_rep_ev_once == FALSE) ||
+ (prev_distance == INVALID_DISTANCE) ||
+ !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
+ notify = TRUE;
+ *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
+ WL_ERR(("\n%s :Svc has ingress Ind %d\n",
+ __FUNCTION__, __LINE__));
+ }
+ }
+ goto done;
+ }
+ if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
+ if (IS_EGRESS_VAL(egress_limit, distance)) {
+ if ((range_rep_ev_once == FALSE) ||
+ (prev_distance == INVALID_DISTANCE) ||
+ !IS_EGRESS_VAL(egress_limit, prev_distance)) {
+ notify = TRUE;
+ *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
+ WL_ERR(("\n%s :Svc has egress Ind %d\n",
+ __FUNCTION__, __LINE__));
+ }
+ }
+ goto done;
+ }
+done:
+ svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
+ return notify;
+}
+
+static int
+wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data)
+{
+ int ret = BCME_OK;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+ ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
+ GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to send event to nan hal\n"));
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+ return ret;
+}
+
+static int32
+wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
+{
+ nan_svc_info_t *svc_info;
+ bool notify_svc = FALSE;
+ nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
+ uint8 ranging_ind = 0;
+ int ret = BCME_OK;
+ int i = 0, j = 0;
+
+ for (i = 0; i < MAX_SUBSCRIBES; i++) {
+ svc_info = rng_inst->svc_idx[i];
+ if (svc_info) {
+ if (nan_event_data->ranging_result_present) {
+ notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
+ &ranging_ind, rng_inst->prev_distance_mm);
+ nan_event_data->ranging_ind = ranging_ind;
+ } else {
+ /* Report only if ranging was needed */
+ notify_svc = svc_info->ranging_required;
+ }
+ WL_DBG(("wl_cfgnan_notify_disc_with_ranging: Ranging notify for"
+ " svc_id %d, notify %d and ind %d\n",
+ svc_info->svc_id, notify_svc, ranging_ind));
+ } else {
+ continue;
+ }
+ if (notify_svc) {
+ for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
+ if (!memcmp(&disc_res[j].peer,
+ &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
+ (svc_info->svc_id == disc_res[j].sub_id)) {
+ ret = wl_nan_cache_to_event_data(&disc_res[j],
+ nan_event_data, cfg->osh);
+ ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
+ /* If its not match once, clear it as the FW indicates
+ * again.
+ */
+ if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
+ wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
+ }
+ }
+ }
+ }
+ }
+ WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
+ return ret;
+}
+
+#ifdef RTT_SUPPORT
+static int32
+wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst, uint8 rng_id)
+{
+ int ret = BCME_OK;
+ uint32 status;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ rng_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
+ __FUNCTION__, ret, status));
+ }
+ dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
+
+ wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_RPT_DIRECTED);
+
+ WL_DBG(("Ongoing ranging session is cancelled \n"));
+ return ret;
+}
+#endif /* RTT_SUPPORT */
+
+static void
+wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t *rng_inst)
+{
+ nan_event_data_t *nan_event_data = NULL;
+
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ goto exit;
+ }
+
+ wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
+
+exit:
+ wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
+
+ return;
+}
+
+#ifdef RTT_SUPPORT
+void
+wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_rpt_ind_t *range_res)
+{
+ nan_ranging_inst_t *rng_inst = NULL;
+ nan_event_data_t nan_event_data;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+
+ UNUSED_PARAMETER(nan_event_data);
+ rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
+ if (!rng_inst) {
+ WL_ERR(("wl_cfgnan_process_range_report: No ranging instance "
+ "but received RNG RPT event..check \n"));
+ goto exit;
+ }
+#ifdef NAN_RTT_DBG
+ DUMP_NAN_RTT_INST(rng_inst);
+ DUMP_NAN_RTT_RPT(range_res);
+#endif // endif
+ range_res->rng_id = rng_inst->range_id;
+ bzero(&nan_event_data, sizeof(nan_event_data));
+ nan_event_data.ranging_result_present = 1;
+ nan_event_data.range_measurement_cm = range_res->dist_mm;
+ (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
+ &range_res->peer_m_addr, ETHER_ADDR_LEN);
+ nan_event_data.ranging_ind = range_res->indication;
+ if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
+ /* check in cache and event match to host */
+ wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
+ range_res->dist_mm);
+ rng_inst->prev_distance_mm = range_res->dist_mm;
+ /* Reset resp reject count on valid measurement */
+ rng_inst->geof_retry_count = 0;
+#ifdef RTT_GEOFENCE_INTERVAL
+ if (rtt_status->geofence_cfg.geofence_rtt_interval < 0) {
+ ; /* Do Nothing */
+ } else
+#endif /* RTT_GEOFENCE_INTERVAL */
+ {
+ wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
+ &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ wl_cfgnan_reset_geofence_ranging(cfg,
+ rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE);
+ }
+ } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst, range_res->rng_id);
+ }
+
+exit:
+ return;
+}
+#endif /* RTT_SUPPORT */
static void
wl_nan_print_status(wl_nan_conf_status_t *nstatus)
break;
}
- printf("> social channels: %d, %d\n",
- nstatus->social_chans[0], nstatus->social_chans[1]);
- printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
- printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
- printf("> hop_count: %d\n", nstatus->hop_count);
- printf("> ambtt: %d\n", nstatus->ambtt);
+ printf("> social channels: %d, %d\n",
+ nstatus->social_chans[0], nstatus->social_chans[1]);
+ printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
+ printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
+ printf("> hop_count: %d\n", nstatus->hop_count);
+ printf("> ambtt: %d\n", nstatus->ambtt);
+}
+
+static void
+wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
+ nan_event_data_t *nan_event_data)
+{
+ if (nan_event_data) {
+ if (nan_event_data->tx_match_filter.data) {
+ MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen);
+ nan_event_data->tx_match_filter.data = NULL;
+ }
+ if (nan_event_data->rx_match_filter.data) {
+ MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
+ nan_event_data->rx_match_filter.dlen);
+ nan_event_data->rx_match_filter.data = NULL;
+ }
+ if (nan_event_data->svc_info.data) {
+ MFREE(cfg->osh, nan_event_data->svc_info.data,
+ nan_event_data->svc_info.dlen);
+ nan_event_data->svc_info.data = NULL;
+ }
+ if (nan_event_data->sde_svc_info.data) {
+ MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
+ nan_event_data->sde_svc_info.dlen);
+ nan_event_data->sde_svc_info.data = NULL;
+ }
+ MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
+ }
+
+}
+
+#ifdef RTT_SUPPORT
+/*
+ * Triggers rtt work thread
+ * if geofence rtt pending,
+ * clears ranging instance
+ * otherwise
+ */
+void
+wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t * rng_inst, int sched_reason)
+{
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ rtt_geofence_target_info_t *geofence_target = NULL;
+ rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
+ int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
+ int8 index = DHD_RTT_INVALID_TARGET_INDEX;
+ bool geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
+ bool retry = FALSE;
+
+ WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, sched_reason = %d, cur_idx = %d, "
+ "geofence_interval = %d\n", sched_reason, rtt_status->geofence_cfg.cur_target_idx,
+ rtt_status->geofence_cfg.geofence_rtt_interval));
+ cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
+ if (cur_idx == -1) {
+ WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, "
+ "Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(rng_inst->peer_addr))));
+ bzero(rng_inst, sizeof(*rng_inst));
+ /* Cancel pending retry timer if any */
+ if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
+ cancel_delayed_work(&rtt_status->rtt_retry_timer);
+ }
+ goto exit;
+ }
+
+ /* Get current geofencing target */
+ geofence_target = dhd_rtt_get_geofence_current_target(dhd);
+
+ /* get target index for cur ranging inst */
+ dhd_rtt_get_geofence_target(dhd,
+ &rng_inst->peer_addr, &index);
+ if ((sched_reason == RTT_SCHED_RTT_RETRY_GEOFENCE) &&
+ (rng_inst->range_status == NAN_RANGING_IN_PROGRESS)) {
+ /* if we are already inprogress with peer
+ * (responder or directed RTT initiator)
+ * retyr later if sched_reason = timeout
+ */
+ retry = TRUE;
+ } else if (cur_idx == index) {
+ /* Reset incoming Ranging instance */
+ rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
+ rng_inst->range_status = NAN_RANGING_REQUIRED;
+ rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
+ (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
+ rng_inst->prev_distance_mm = INVALID_DISTANCE;
+ }
+ } else {
+ if (index == DHD_RTT_INVALID_TARGET_INDEX) {
+ /* Remove incoming Ranging instance */
+ WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
+ MAC2STRDBG(&(rng_inst->peer_addr))));
+ bzero(rng_inst, sizeof(*rng_inst));
+ } else {
+ /* Reset incoming Ranging instance */
+ rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
+ rng_inst->range_status = NAN_RANGING_REQUIRED;
+ rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
+ (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
+ rng_inst->prev_distance_mm = INVALID_DISTANCE;
+ }
+ }
+ /* Create range inst if not present and reset explicitly */
+ rng_inst = wl_cfgnan_get_ranging_inst(cfg,
+ &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
+ }
+
+ /* Avoid schedule if
+ * already geofence running
+ * or Directed RTT in progress
+ * or Invalid RTT state like
+ * NDP with Peer
+ */
+ if ((geofence_state == TRUE) ||
+ (!RTT_IS_STOPPED(rtt_status)) ||
+ (rtt_invalid_reason != RTT_STATE_VALID)) {
+ /* Not in valid RTT state, avoid schedule */
+ goto exit;
+ }
+
+ if ((cur_idx == 0) && ((sched_reason == RTT_SCHED_RNG_RPT_GEOFENCE) ||
+ (sched_reason == RTT_SCHED_RNG_TERM))) {
+ /* First Target again after all done, retry over a timer */
+ retry = TRUE;
+ }
+
+ if (retry && (rtt_status->geofence_cfg.geofence_rtt_interval >= 0)) {
+ /* Move to first target and retry over a timer */
+ WL_DBG(("Retry over a timer, cur_idx = %d\n",
+ rtt_status->geofence_cfg.cur_target_idx));
+ /* schedule proxd retry timer */
+ schedule_delayed_work(&rtt_status->rtt_retry_timer,
+ msecs_to_jiffies(rtt_status->geofence_cfg.geofence_rtt_interval));
+ goto exit;
+
+ }
+
+ /* schedule RTT */
+ dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
+
+exit:
+ return;
+}
+
+static bool
+wl_check_range_role_concurrency(dhd_pub_t *dhd, nan_ranging_inst_t *rng_inst)
+{
+ ASSERT(rng_inst);
+ if ((dhd_rtt_get_role_concurrency_state(dhd) == TRUE) &&
+ (rng_inst->num_svc_ctx > 0)) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+static void
+wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t *dhd,
+ nan_ranging_inst_t *rng_inst)
+{
+ /* Update rang_inst to initiator and resolve role concurrency */
+ rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
+ dhd_rtt_set_role_concurrency_state(dhd, FALSE);
+}
+#endif /* RTT_SUPPORT */
+
+static bool
+wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
+{
+ bool geof_retry = FALSE;
+
+ switch (reason_code) {
+ case NAN_RNG_TERM_IDLE_TIMEOUT:
+ /* Fallthrough: Keep adding more reason code if needed */
+ case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
+ case NAN_RNG_TERM_RNG_RESP_REJ:
+ case NAN_RNG_TERM_RNG_TXS_FAIL:
+ if (rng_inst->geof_retry_count <
+ NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
+ rng_inst->geof_retry_count++;
+ geof_retry = TRUE;
+ }
+ break;
+ default:
+ /* FALSE for any other case */
+ break;
+ }
+
+ return geof_retry;
}
s32
NAN_DBG_ENTER();
NAN_MUTEX_LOCK();
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
- ret = BCME_OK;
- goto exit;
- }
if (!event || !event_data) {
WL_ERR(("event data is NULL\n"));
ret = -EINVAL;
event_type = ntoh32(event->event_type);
event_num = ntoh32(event->reason);
data_len = ntoh32(event->datalen);
- nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
- if (!nan_event_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- goto exit;
- }
if (NAN_INVALID_EVENT(event_num)) {
WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
goto exit;
}
WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
- nan_event_to_str(event_num), event_num, data_len));
+ nan_event_to_str(event_num), event_num, data_len));
#ifdef WL_NAN_DEBUG
prhex("nan_event_data:", event_data, data_len);
#endif /* WL_NAN_DEBUG */
+ if (!cfg->nan_init_state) {
+ WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
+ ret = BCME_OK;
+ goto exit;
+ }
+
+ nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
+ if (!nan_event_data) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ goto exit;
+ }
+
nan_event_ctx.cfg = cfg;
nan_event_ctx.nan_evt_data = nan_event_data;
/*
WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
/* Mapping to common struct between DHD and HAL */
nan_event_data->enabled = nstatus->enabled;
- memcpy(&nan_event_data->local_nmi, &nstatus->nmi,
- ETHER_ADDR_LEN);
- memcpy(&nan_event_data->clus_id, &nstatus->cid,
- ETHER_ADDR_LEN);
+ ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
+ &nstatus->nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nmi\n"));
+ goto exit;
+ }
+ ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
+ &nstatus->cid, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy cluster id\n"));
+ goto exit;
+ }
nan_event_data->nan_de_evt_type = event_num;
#ifdef WL_NAN_DEBUG
wl_nan_print_status(nstatus);
#endif /* WL_NAN_DEBUG */
if (event_num == WL_NAN_EVENT_START) {
- cfg->nan_enable = true;
OSL_SMP_WMB();
cfg->nancfg.nan_event_recvd = true;
OSL_SMP_WMB();
hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
break;
}
-
- case WL_NAN_EVENT_STOP: {
- WL_INFORM_MEM((">> Nan Mac Stop Event Received\n"));
- hal_event_id = GOOGLE_NAN_EVENT_DISABLED;
- OSL_SMP_WMB();
- cfg->nancfg.nan_event_recvd = true;
- OSL_SMP_WMB();
- wake_up(&cfg->nancfg.nan_event_wait);
- cfg->nancfg.inst_id_start = 0;
- memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
- memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
- if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
- /* do not event to host if command is from host */
- goto exit;
- } else if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
- nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
- } else {
- nan_event_data->status = NAN_STATUS_SUCCESS;
- }
- break;
- }
case WL_NAN_EVENT_TERMINATED: {
bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
pev->reason == NAN_TERM_REASON_USER_REQ ||
pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
nan_event_data->status = NAN_STATUS_SUCCESS;
- memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_SUCCESS",
strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
} else {
nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
- memcpy(nan_event_data->nan_reason, "NAN_STATUS_INTERNAL_FAILURE",
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_INTERNAL_FAILURE",
strlen("NAN_STATUS_INTERNAL_FAILURE"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
}
if (pev->svctype == NAN_SC_SUBSCRIBE) {
hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
}
#ifdef WL_NAN_DISC_CACHE
- /* terminate ranging sessions */
- wl_cfgnan_terminate_ranging_sessions(bcmcfg_to_prmry_ndev(cfg),
- cfg, pev->instance_id);
+ if (pev->reason != NAN_TERM_REASON_USER_REQ) {
+ wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
+ /* terminate ranging sessions */
+ wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
+ }
#endif /* WL_NAN_DISC_CACHE */
break;
}
wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
wl_nan_event_sd_txs_t *txs_sd = NULL;
if (txs->status == WL_NAN_TXS_SUCCESS) {
- WL_MEM(("TXS success for type %d token %d",
+ WL_INFORM_MEM(("TXS success for type %d token %d\n",
txs->type, txs->host_seq));
nan_event_data->status = NAN_STATUS_SUCCESS;
- memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_SUCCESS",
strlen("NAN_STATUS_SUCCESS"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
} else {
/* TODO : populate status based on reason codes
For now adding it as no ACK, so that app/framework can retry
*/
- WL_INFORM_MEM(("TXS failed for type %d status %d token %d",
+ WL_INFORM_MEM(("TXS failed for type %d status %d token %d\n",
txs->type, txs->status, txs->host_seq));
nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
- memcpy(nan_event_data->nan_reason, "NAN_STATUS_NO_OTA_ACK",
+ ret = memcpy_s(nan_event_data->nan_reason,
+ sizeof(nan_event_data->nan_reason),
+ "NAN_STATUS_NO_OTA_ACK",
strlen("NAN_STATUS_NO_OTA_ACK"));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy nan_reason\n"));
+ goto exit;
+ }
}
nan_event_data->reason = txs->reason_code;
nan_event_data->token = txs->host_seq;
break;
}
#ifdef WL_NAN_DISC_CACHE
- case WL_NAN_EVENT_RNG_RPT_IND: {
+ case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- wl_nan_ev_rng_rpt_ind_t *range_res = (wl_nan_ev_rng_rpt_ind_t *)xtlv->data;
- nan_disc_result_cache *cache;
- nan_event_data->ranging_result_present = 1;
- nan_event_data->range_measurement_cm = range_res->dist_mm/10;
- memcpy(&nan_event_data->remote_nmi, &range_res->peer_m_addr, ETHER_ADDR_LEN);
- nan_event_data->ranging_ind = range_res->indication;
- WL_INFORM(("ranging ind = %d\n", range_res->indication));
- /* check in cache */
- cache = wl_cfgnan_get_disc_result(cfg,
- 0, &range_res->peer_m_addr);
- if (!cache) {
- ret = BCME_ERROR;
- WL_ERR(("Disc Cache entry not present for peer: " MACDBG "\n",
- MAC2STRDBG(range_res->peer_m_addr.octet)));
- goto exit;
- }
- WL_TRACE(("Disc cache entry, populate it\n"));
- ret = wl_nan_cache_to_event_data(cache,
- nan_event_data, cfg->osh);
- if (ret != BCME_OK) {
- goto exit;
- }
-#ifdef RTT_SUPPORT
- if (cfg->nancfg.range_type == LEGACY_NAN_RTT) {
- ret = wl_cfgvendor_send_as_rtt_legacy_event(cfg->wdev->wiphy,
- bcmcfg_to_prmry_ndev(cfg), range_res, 0);
- if (ret != BCME_OK) {
- goto exit;
- }
- if (range_res->rng_id) {
- ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- range_res->rng_id, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
- __FUNCTION__, ret, status));
- }
- WL_INFORM(("Ongoing ranging session is cancelled \n"));
+ wl_nan_ev_disc_cache_timeout_t *cache_data =
+ (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
+ wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
+ uint16 xtlv_len = xtlv->len;
+ uint8 entry_idx = 0;
+
+ if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
+ xtlv_len = xtlv_len -
+ OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
+ while ((entry_idx < cache_data->count) &&
+ (xtlv_len >= sizeof(*cache_entry))) {
+ cache_entry = &cache_data->cache_exp_list[entry_idx];
+ /* Handle ranging cases for cache timeout */
+ wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
+ cache_entry->l_sub_id);
+ /* Invalidate local cache info */
+ wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
+ xtlv_len = xtlv_len - sizeof(*cache_entry);
+ entry_idx++;
}
}
-#endif /* RTT_SUPPORT */
break;
}
case WL_NAN_EVENT_RNG_REQ_IND: {
nan_opts_len = data_len;
rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
- WL_INFORM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d\n",
- rng_ind->rng_id));
+ WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
+ " peer:" MACDBG "\n", rng_ind->rng_id,
+ MAC2STRDBG(&rng_ind->peer_m_addr)));
+#ifdef RTT_SUPPORT
ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
+#endif /* RTT_SUPPORT */
/* no need to event to HAL */
goto exit;
}
case WL_NAN_EVENT_RNG_TERM_IND: {
bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
nan_ranging_inst_t *rng_inst;
wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
- WL_TRACE(("Peer_NMI: " MACDBG "\n",
- MAC2STRDBG(range_term->peer_m_addr.octet)));
- WL_TRACE(("Reason code:%d\n", range_term->reason_code));
- WL_TRACE(("Received WL_NAN_EVENT_RNG_TERM_IND\n"));
- rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_term->peer_m_addr);
+#ifdef RTT_SUPPORT
+ int8 index = -1;
+ rtt_geofence_target_info_t* geofence_target;
+ rtt_status_info_t *rtt_status;
+ int rng_sched_reason = 0;
+#endif /* RTT_SUPPORT */
+ BCM_REFERENCE(dhd);
+ WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
+ " Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
+ range_term->rng_id, range_term->reason_code));
+ rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
if (rng_inst) {
- /* clear ranging instance */
- WL_TRACE(("reset the ranging instance"));
- memset(rng_inst, 0, sizeof(*rng_inst));
+#ifdef RTT_SUPPORT
+ rng_sched_reason = RTT_SCHED_RNG_TERM;
+ if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
+ dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
+ } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
+ if (wl_cfgnan_geofence_retry_check(rng_inst,
+ range_term->reason_code)) {
+ rtt_status = GET_RTTSTATE(dhd);
+ GEOFENCE_RTT_LOCK(rtt_status);
+ dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
+ GEOFENCE_RTT_UNLOCK(rtt_status);
+ } else {
+ /* Report on ranging failure */
+ wl_cfgnan_disc_result_on_geofence_cancel(cfg,
+ rng_inst);
+ WL_TRACE(("Reset the state on terminate\n"));
+ geofence_target = dhd_rtt_get_geofence_target(dhd,
+ &rng_inst->peer_addr, &index);
+ if (geofence_target) {
+ dhd_rtt_remove_geofence_target(dhd,
+ &geofence_target->peer_addr);
+ }
+ }
+ /* Set geofence RTT in progress state to false */
+ dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
+ }
+ if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
+ wl_check_range_role_concurrency(dhd, rng_inst)) {
+ /* Resolve role concurrency */
+ wl_cfgnan_resolve_ranging_role_concurrecny(dhd, rng_inst);
+ /* Override sched reason if role concurrency just resolved */
+ rng_sched_reason = RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE;
+ }
+ /* Reset Ranging Instance and trigger ranging if applicable */
+ wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason);
+#endif /* RTT_SUPPORT */
}
break;
}
break;
}
default:
- WL_ERR(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
+ WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
ret = BCME_ERROR;
goto exit;
}
#ifdef WL_NAN_DISC_CACHE
if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
+#ifdef RTT_SUPPORT
+ u8 rtt_invalid_reason = RTT_STATE_VALID;
+ bool role_concur_state = 0;
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+#endif /* RTT_SUPPORT */
+ u16 update_flags = 0;
WL_TRACE(("Cache disc res\n"));
- ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data);
+ ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
if (ret) {
WL_ERR(("Failed to cache disc result ret %d\n", ret));
}
if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
- ret = wl_cfgnan_check_disc_res_for_ranging(cfg, nan_event_data);
+ ret = wl_cfgnan_check_disc_result_for_ranging(cfg, nan_event_data);
if (ret == BCME_OK) {
- /* disc result to HAL will be given on ranging report */
- goto exit;
+#ifdef RTT_SUPPORT
+ rtt_invalid_reason = dhd_rtt_invalid_states
+ (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
+ role_concur_state = dhd_rtt_get_role_concurrency_state(dhd);
+ /*
+ * If instant RTT not possible,
+ * send discovery result instantly like
+ * incase of invalid rtt state as
+ * NDP connected/connecting or role_concurrency
+ * on, otherwise, disc result will be posted
+ * on ranging report event
+ */
+ if (rtt_invalid_reason == RTT_STATE_VALID &&
+ role_concur_state == FALSE) {
+ /* Avoid sending disc result instantly */
+ goto exit;
+ }
+#endif /* RTT_SUPPORT */
} else {
/* TODO: should we terminate service if ranging fails ? */
- WL_ERR(("Ranging failed or not required"));
+ WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
+ " sub_id:%d , pub_id:%d\n",
+ MAC2STRDBG(&nan_event_data->remote_nmi),
+ nan_event_data->sub_id, nan_event_data->pub_id));
}
} else {
- WL_TRACE(("Ranging not required\n"));
+ nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
+ nan_event_data->sub_id, 0);
+ if (svc_info && svc_info->ranging_required &&
+ (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
+ wl_cfgnan_ranging_clear_publish(cfg,
+ &nan_event_data->remote_nmi, nan_event_data->sub_id);
+ }
}
+
/*
* If tx match filter is present as part of active subscribe, keep same filter
* values in discovery results also.
ret = -ENOMEM;
goto exit;
}
- memcpy(nan_event_data->tx_match_filter.data,
- svc->tx_match_filter, svc->tx_match_filter_len);
+ ret = memcpy_s(nan_event_data->tx_match_filter.data,
+ nan_event_data->tx_match_filter.dlen,
+ svc->tx_match_filter, svc->tx_match_filter_len);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter data\n"));
+ goto exit;
+ }
}
}
}
#endif /* WL_NAN_DISC_CACHE */
- /* Send up range result as subscribe match event */
- if (event_num == WL_NAN_EVENT_RNG_RPT_IND) {
- WL_TRACE(("Send up range result as subscribe match event\n"));
- hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
- }
-
WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
nan_event_to_str(event_num), event_num, hal_event_id));
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
exit:
- if (nan_event_data) {
- if (nan_event_data->tx_match_filter.data) {
- MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
- nan_event_data->tx_match_filter.dlen);
- nan_event_data->tx_match_filter.data = NULL;
- }
- if (nan_event_data->rx_match_filter.data) {
- MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
- nan_event_data->rx_match_filter.dlen);
- nan_event_data->rx_match_filter.data = NULL;
- }
- if (nan_event_data->svc_info.data) {
- MFREE(cfg->osh, nan_event_data->svc_info.data,
- nan_event_data->svc_info.dlen);
- nan_event_data->svc_info.data = NULL;
- }
- if (nan_event_data->sde_svc_info.data) {
- MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
- nan_event_data->sde_svc_info.dlen);
- nan_event_data->sde_svc_info.data = NULL;
- }
- MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
- }
+ wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
NAN_MUTEX_UNLOCK();
NAN_DBG_EXIT();
#ifdef WL_NAN_DISC_CACHE
static int
-wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data)
+wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
+ u16 *disc_cache_update_flags)
{
nan_event_data_t* disc = (nan_event_data_t*)data;
int i, add_index = 0;
int ret = BCME_OK;
nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
+ *disc_cache_update_flags = 0;
if (!cfg->nan_enable) {
WL_DBG(("nan not enabled"));
}
if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
!memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
- WL_TRACE(("cache entry already present"));
+ WL_DBG(("cache entry already present, i = %d", i));
+ /* Update needed parameters here */
+ if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
+ disc_res[i].sde_control_flag = disc->sde_control_flag;
+ *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
+ }
ret = BCME_OK; /* entry already present */
goto done;
}
}
- WL_TRACE(("adding cache entry"));
+ WL_DBG(("adding cache entry: add_index = %d\n", add_index));
disc_res[add_index].valid = 1;
disc_res[add_index].pub_id = disc->pub_id;
disc_res[add_index].sub_id = disc->sub_id;
disc_res[add_index].publish_rssi = disc->publish_rssi;
disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
- memcpy(&disc_res[add_index].peer, &disc->remote_nmi, ETHER_ADDR_LEN);
- memcpy(disc_res[add_index].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN);
+ disc_res[add_index].sde_control_flag = disc->sde_control_flag;
+ ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
+ &disc->remote_nmi, ETHER_ADDR_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy remote nmi\n"));
+ goto done;
+ }
+ ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
+ disc->svc_name, WL_NAN_SVC_HASH_LEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash\n"));
+ goto done;
+ }
if (disc->svc_info.dlen && disc->svc_info.data) {
disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
ret = BCME_NOMEM;
goto done;
}
- memcpy(disc_res[add_index].svc_info.data,
- disc->svc_info.data, disc->svc_info.dlen);
+ ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
+ disc->svc_info.data, disc->svc_info.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto done;
+ }
}
if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
ret = BCME_NOMEM;
goto done;
}
- memcpy(disc_res[add_index].tx_match_filter.data,
+ ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
+ disc_res[add_index].tx_match_filter.dlen,
disc->tx_match_filter.data, disc->tx_match_filter.dlen);
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter\n"));
+ goto done;
+ }
}
cfg->nan_disc_count++;
+ WL_DBG(("cfg->nan_disc_count = %d\n", cfg->nan_disc_count));
done:
return ret;
}
+/* Sending command to FW for clearing discovery cache info in FW */
+static int
+wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
+{
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ uint32 status;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ uint8 buf[NAN_IOCTL_BUF_SIZE];
+ bcm_iov_batch_buf_t *nan_buf;
+ bcm_iov_batch_subcmd_t *sub_cmd;
+ uint16 subcmd_len;
+
+ /* Same src and dest len here */
+ memset_s(buf, sizeof(buf), 0, sizeof(buf));
+
+ nan_buf = (bcm_iov_batch_buf_t*)buf;
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+
+ sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(sub_id), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ /* Fill the sub_command block */
+ sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ /* Data size len vs buffer len check is already done above.
+ * So, short buffer error is impossible.
+ */
+ (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
+ &sub_id, sizeof(sub_id));
+ /* adjust iov data len to the end of last data record */
+ nan_buf_size -= (subcmd_len);
+
+ nan_buf->count++;
+ nan_buf->is_set = true;
+ nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
+ /* Same src and dest len here */
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
+ nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
+ ret, status));
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
uint8 local_subid)
{
goto done;
}
for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if (disc_res[i].sub_id == local_subid) {
+ if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
WL_TRACE(("make cache entry invalid\n"));
- disc_res[i].valid = 0;
+ if (disc_res[i].tx_match_filter.data) {
+ MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
+ disc_res[i].tx_match_filter.dlen);
+ }
+ if (disc_res[i].svc_info.data) {
+ MFREE(cfg->osh, disc_res[i].svc_info.data,
+ disc_res[i].svc_info.dlen);
+ }
+ memset_s(&disc_res[i], sizeof(disc_res[i]), 0, sizeof(disc_res[i]));
cfg->nan_disc_count--;
ret = BCME_OK;
}
for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
if ((disc_res[i].pub_id == remote_pubid) &&
!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
- WL_TRACE(("Found entry"));
+ WL_DBG(("Found entry: i = %d\n", i));
return &disc_res[i];
}
}
} else {
for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
- WL_TRACE(("Found entry"));
+ WL_DBG(("Found entry: %d\n", i));
return &disc_res[i];
}
}
}
#endif /* WL_NAN_DISC_CACHE */
-static void
-wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id)
+void
+wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
+ nan_data_path_id ndp_id)
{
+ uint8 i;
+ bool match_found = false;
#ifdef ARP_OFFLOAD_SUPPORT
dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
#endif /* ARP_OFFLOAD_SUPPORT */
return;
}
- if (enable) {
- /* On first NAN DP indication, disable ARP. */
+ if (add) {
+ /* On first NAN DP establishment, disable ARP. */
#ifdef ARP_OFFLOAD_SUPPORT
- if (!cfg->nan_dp_mask) {
+ if (!cfg->nan_dp_count) {
dhd_arp_offload_set(dhd, 0);
dhd_arp_offload_enable(dhd, false);
}
#endif /* ARP_OFFLOAD_SUPPORT */
- cfg->nan_dp_mask |= (0x1 << nan_dp_id);
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (!cfg->nancfg.ndp_id[i]) {
+ WL_TRACE(("Found empty field\n"));
+ break;
+ }
+ }
+
+ if (i == NAN_MAX_NDP_PEER) {
+ WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
+ return;
+ }
+ if (ndp_id) {
+ cfg->nan_dp_count++;
+ cfg->nancfg.ndp_id[i] = ndp_id;
+ WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
+ __FUNCTION__, cfg->nancfg.ndp_id[i], i));
+ }
} else {
- cfg->nan_dp_mask &= ~(0x1 << nan_dp_id);
+ ASSERT(cfg->nan_dp_count);
+ if (ndp_id) {
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (cfg->nancfg.ndp_id[i] == ndp_id) {
+ cfg->nancfg.ndp_id[i] = 0;
+ WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
+ __FUNCTION__, ndp_id, i));
+ match_found = true;
+ if (cfg->nan_dp_count) {
+ cfg->nan_dp_count--;
+ }
+ break;
+ } else {
+ WL_DBG(("couldn't find entry for ndp id = %d\n",
+ ndp_id));
+ }
+ }
+ if (match_found == false) {
+ WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
+ }
+ }
+
#ifdef ARP_OFFLOAD_SUPPORT
- if (!cfg->nan_dp_mask) {
+ if (!cfg->nan_dp_count) {
/* If NAN DP count becomes zero and if there
* are no conflicts, enable back ARP offload.
* As of now, the conflicting interfaces are AP
}
#endif /* ARP_OFFLOAD_SUPPORT */
}
- WL_INFORM_MEM(("NAN_DP_MASK:0x%x\n", cfg->nan_dp_mask));
+ WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", cfg->nan_dp_count));
}
bool
}
cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
- nan_dp = cfg->nan_dp_mask ? true : false;
+ nan_dp = cfg->nan_dp_count ? true : false;
WL_DBG(("NAN DP status:%d\n", nan_dp));
return nan_dp;
len = MIN(strlen(name), IFNAMSIZ);
for (i = 0; i < NAN_MAX_NDI; i++) {
if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
- memset(&cfg->nancfg.ndi[i].ifname, 0x0, IFNAMSIZ);
+ memset_s(&cfg->nancfg.ndi[i].ifname, IFNAMSIZ,
+ 0x0, IFNAMSIZ);
cfg->nancfg.ndi[i].in_use = false;
cfg->nancfg.ndi[i].created = false;
+ cfg->nancfg.ndi[i].nan_ndev = NULL;
return i;
}
}
}
return NULL;
}
+
+s32
+wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
+ struct net_device *nan_ndev)
+{
+ s32 ret = BCME_OK;
+ uint8 i = 0;
+ for (i = 0; i < NAN_MAX_NDI; i++) {
+ if (cfg->nancfg.ndi[i].in_use &&
+ cfg->nancfg.ndi[i].created &&
+ (cfg->nancfg.ndi[i].nan_ndev == nan_ndev)) {
+ WL_INFORM_MEM(("iface name: %s, cfg->nancfg.ndi[i].nan_ndev = %p"
+ " and nan_ndev = %p\n",
+ (char*)cfg->nancfg.ndi[i].ifname,
+ cfg->nancfg.ndi[i].nan_ndev, nan_ndev));
+ ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
+ (char*)cfg->nancfg.ndi[i].ifname);
+ if (ret) {
+ WL_ERR(("failed to del ndi [%d]\n", ret));
+ goto exit;
+ }
+ /* After successful delete of interface,
+ * clear up the ndi data
+ */
+ if (wl_cfgnan_del_ndi_data(cfg,
+ (char*)cfg->nancfg.ndi[i].ifname) < 0) {
+ WL_ERR(("Failed to find matching data for ndi:%s\n",
+ (char*)cfg->nancfg.ndi[i].ifname));
+ }
+ }
+ }
+ exit:
+ return ret;
+}
+
+int
+wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
+{
+ bcm_iov_batch_buf_t *nan_buf = NULL;
+ uint16 subcmd_len;
+ bcm_iov_batch_subcmd_t *sub_cmd = NULL;
+ bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
+ uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
+ wl_nan_conf_status_t *nstatus = NULL;
+ uint32 status;
+ s32 ret = BCME_OK;
+ uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ NAN_DBG_ENTER();
+
+ nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
+ if (!nan_buf) {
+ WL_ERR(("%s: memory allocation failed\n", __func__));
+ ret = BCME_NOMEM;
+ goto fail;
+ }
+
+ nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
+ nan_buf->count = 0;
+ nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
+ sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
+
+ ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
+ sizeof(*nstatus), &subcmd_len);
+ if (unlikely(ret)) {
+ WL_ERR(("nan_sub_cmd check failed\n"));
+ goto fail;
+ }
+
+ nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
+ sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
+ sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
+ sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
+ nan_buf_size -= subcmd_len;
+ nan_buf->count = 1;
+ nan_buf->is_set = false;
+
+ memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
+ ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
+ (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("get nan status failed ret %d status %d \n",
+ ret, status));
+ goto fail;
+ }
+ sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
+ /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
+ nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
+ ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
+ nstatus, sizeof(wl_nan_conf_status_t));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match filter\n"));
+ goto fail;
+ }
+
+fail:
+ if (nan_buf) {
+ MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
+ }
+ NAN_DBG_EXIT();
+ return ret;
+}
#endif /* WL_NAN */
/*
* Neighbor Awareness Networking
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
+ *
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgnan.h 769296 2018-06-25 14:48:17Z $
+ * $Id: wl_cfgnan.h 825970 2019-06-18 05:28:31Z $
*/
#ifndef _wl_cfgnan_h_
/* NAN structs versioning b/w DHD and HAL
* define new version if any change in any of the shared structs
*/
-#define NAN_HAL_VERSION_1 0x1
+#define NAN_HAL_VERSION_1 0x2
#define NAN_EVENT_BUFFER_SIZE_LARGE 1024u
+#define NAN_RANGE_EXT_CANCEL_SUPPORT_VER 2
#define WL_NAN_IOV_BATCH_VERSION 0x8000
#define WL_NAN_AVAIL_REPEAT_INTVL 0x0200
#define WL_NAN_AVAIL_START_INTVL 160
#define NAN_MAX_AWAKE_DW_INTERVAL 5
#define NAN_MAXIMUM_ID_NUMBER 255
#define NAN_MAXIMUM_MASTER_PREFERENCE 254
-#define NAN_ID_MIN 0
+#define NAN_ID_RESERVED 0
+#define NAN_ID_MIN 1
#define NAN_ID_MAX 255
#define NAN_DEF_SOCIAL_CHAN_2G 6
#define NAN_DEF_SOCIAL_CHAN_5G 149
#define WL_AVAIL_BANDWIDTH_5G WL_CHANSPEC_BW_80
#define NAN_RANGING_PERIOD WL_AVAIL_PERIOD_1024
#define NAN_SYNC_DEF_AWAKE_DW 1
+#define NAN_RNG_TERM_FLAG_NONE 0
#define NAN_BLOOM_LENGTH_DEFAULT 240u
#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
(num >= WL_NAN_EVENT_INVALID))
#define NAN_INVALID_PROXD_EVENT(num) (num != WLC_E_PROXD_NAN_EVENT)
#define NAN_EVENT_BIT(event) (1U << (event - WL_NAN_EVENT_START))
+#define NAN_EVENT_MAP(event) ((event) - WL_NAN_EVENT_START)
#define NAME_TO_STR(name) #name
#define NAN_ID_CTRL_SIZE ((NAN_MAXIMUM_ID_NUMBER/8) + 1)
#define MAX_SCID_LEN 0
#define IS_NDP_SECURITY_SUPPORTED true
#define NDP_SUPPORTED_BANDS 2
-
#define NAN_MAX_RANGING_INST 8u
+#define NAN_MAX_RANGING_SSN_ALLOWED 1u
#define NAN_MAX_SVC_INST (MAX_PUBLISHES + MAX_SUBSCRIBES)
#define NAN_SVC_INST_SIZE 32u
#define NAN_START_STOP_TIMEOUT 5000
+#define NAN_MAX_NDP_PEER 8u
+#define NAN_DISABLE_CMD_DELAY_TIMER 4000u
#ifdef WL_NAN_DEBUG
#define NAN_MUTEX_LOCK() {WL_DBG(("Mutex Lock: Enter: %s\n", __FUNCTION__)); \
#define NAN_RANGING_INDICATE_CONTINUOUS_MASK 0x01
#define NAN_RANGE_REQ_CMD 0
#define NAN_RNG_REQ_ACCEPTED_BY_HOST 1
+#define NAN_RNG_REQ_REJECTED_BY_HOST 0
+
+#define NAN_RNG_GEOFENCE_MAX_RETRY_CNT 3u
typedef uint32 nan_data_path_id;
NAN_BUS_IS_DOWN = 2,
NAN_DEINITIALIZED = 3,
NAN_COUNTRY_CODE_CHANGE = 4
-
} nan_stop_reason_code_t;
-typedef enum nan_range_types {
- LEGACY_RTT = 0,
- LEGACY_NAN_RTT = 1,
- GEOFENCE_NAN_RTT = 2
-} nan_range_types_t;
-
typedef enum nan_range_status {
NAN_RANGING_INVALID = 0,
NAN_RANGING_REQUIRED = 1,
NAN_RANGING_IN_PROGRESS = 2
} nan_range_status_t;
+typedef enum nan_range_role {
+ NAN_RANGING_ROLE_INVALID = 0,
+ NAN_RANGING_ROLE_INITIATOR = 1,
+ NAN_RANGING_ROLE_RESPONDER = 2
+} nan_range_role_t;
+
typedef struct nan_svc_inst {
uint8 inst_id; /* publisher/subscriber id */
uint8 inst_type; /* publisher/subscriber */
} nan_svc_inst_t;
+/* Range Status Flag bits for svc info */
+#define SVC_RANGE_REP_EVENT_ONCE 0x01
+
+/* Range Status Flag bits for svc info */
+#define SVC_RANGE_REP_EVENT_ONCE 0x01
+
typedef struct nan_svc_info {
bool valid;
nan_data_path_id ndp_id[NAN_MAX_SVC_INST];
uint32 ranging_interval;
uint32 ingress_limit;
uint32 egress_limit;
+ uint32 flags;
uint8 tx_match_filter[MAX_MATCH_FILTER_LEN]; /* TX match filter */
uint8 tx_match_filter_len;
+ uint8 svc_range_status; /* For managing any svc range status flags */
} nan_svc_info_t;
+/* NAN Peer DP state */
+typedef enum {
+ NAN_PEER_DP_NOT_CONNECTED = 0,
+ NAN_PEER_DP_CONNECTING = 1,
+ NAN_PEER_DP_CONNECTED = 2
+} nan_peer_dp_state_t;
+
+typedef struct nan_ndp_peer {
+ uint8 peer_dp_state;
+ uint8 dp_count;
+ struct ether_addr peer_addr;
+} nan_ndp_peer_t;
+
+#define INVALID_DISTANCE 0xFFFFFFFF
typedef struct nan_ranging_inst {
- uint8 svc_inst_id;
uint8 range_id;
nan_range_status_t range_status;
struct ether_addr peer_addr;
+ int range_type;
+ uint8 num_svc_ctx;
+ nan_svc_info_t *svc_idx[MAX_SUBSCRIBES];
+ uint32 prev_distance_mm;
+ nan_range_role_t range_role;
+ bool in_use;
+ uint8 geof_retry_count;
} nan_ranging_inst_t;
+#define DUMP_NAN_RTT_INST(inst) { printf("svc instance ID %d", (inst)->svc_inst_id); \
+ printf("Range ID %d", (inst)->range_id); \
+ printf("range_status %d", (inst)->range_status); \
+ printf("Range Type %d", (inst)->range_type); \
+ printf("Peer MAC "MACDBG"\n", MAC2STRDBG((inst)->peer_addr.octet)); \
+ }
+
+#define DUMP_NAN_RTT_RPT(rpt) { printf("Range ID %d", (rpt)->rng_id); \
+ printf("Distance in MM %d", (rpt)->dist_mm); \
+ printf("range_indication %d", (rpt)->indication); \
+ printf("Peer MAC "MACDBG"\n", MAC2STRDBG((rpt)->peer_m_addr.octet)); \
+ }
/*
* Data request Initiator/Responder
* app/service related info
uint32 ttl; /* time to live */
uint32 period; /* publish period */
uint32 flags; /* Flag bits */
+ bool sde_control_config; /* whether sde_control present */
uint16 sde_control_flag;
uint16 token; /* transmit fup token id */
uint8 csid; /* cipher suite type */
bool is_ndp_security_supported;
uint32 max_sdea_service_specific_info_len;
uint32 max_subscribe_address;
+ uint32 ndpe_attr_supported;
} nan_hal_capabilities_t;
typedef struct _nan_hal_resp {
int32 value;
/* Identifier for the instance of the NDP */
uint16 ndp_instance_id;
- nan_hal_capabilities_t capabilities;
/* Publisher NMI */
uint8 pub_nmi[NAN_MAC_ADDR_LEN];
/* SVC_HASH */
uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
+ char pad[3];
+ nan_hal_capabilities_t capabilities;
} nan_hal_resp_t;
typedef struct wl_nan_iov {
uint8 event_ind_flag, bool disable_events);
extern int wl_cfgnan_start_handler(struct net_device *ndev,
struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
-extern int wl_cfgnan_stop_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, bool disable_events);
+extern int wl_cfgnan_stop_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg);
+extern void wl_cfgnan_delayed_disable(struct work_struct *work);
extern int wl_cfgnan_config_handler(struct net_device *ndev,
struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
extern int wl_cfgnan_support_handler(struct net_device *ndev,
struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
extern int wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate);
+ struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate);
extern int wl_cfgnan_data_path_request_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
- uint8 *ndp_instance_id);
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
+ uint8 *ndp_instance_id);
extern int wl_cfgnan_data_path_response_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data);
extern int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
+ int *status);
#ifdef WL_NAN_DISC_CACHE
extern int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp);
/* ranging quest and response iovar handler */
extern int wl_cfgnan_trigger_ranging(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, void *event_data, nan_svc_info_t *svc, uint8 range_req);
+ struct bcm_cfg80211 *cfg, void *event_data, nan_svc_info_t *svc,
+ uint8 range_req, bool accept_req);
#endif /* WL_NAN_DISC_CACHE */
+void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg);
+void wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg);
+
extern bool wl_cfgnan_is_dp_active(struct net_device *ndev);
extern s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg);
extern s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name);
extern s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name);
extern struct wl_ndi_data *wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name);
-extern int wl_cfgnan_disable(struct bcm_cfg80211 *cfg, nan_stop_reason_code_t reason);
+extern int wl_cfgnan_disable(struct bcm_cfg80211 *cfg);
extern nan_ranging_inst_t *wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer, uint8 svc_id, bool create);
+ struct ether_addr *peer, nan_range_role_t range_role);
+extern nan_ranging_inst_t* wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer);
+#ifdef RTT_SUPPORT
+extern int wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
+ struct ether_addr *peer_addr);
+#endif /* RTT_SUPPORT */
+extern int wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
+ struct ether_addr *peer, int suspend_reason, u8 cancel_flags);
+extern nan_ndp_peer_t* wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+bool wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg);
+bool wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr);
+extern s32 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg, struct net_device *nan_ndev);
+void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
+ struct ether_addr *peer_addr, nan_peer_dp_state_t state);
+#ifdef RTT_SUPPORT
+int wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev, struct bcm_cfg80211 *cfg);
+void wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
+ nan_ranging_inst_t * rng_inst, int sched_reason);
+void wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
+ wl_nan_ev_rng_rpt_ind_t *range_res);
+#endif /* RTT_SUPPORT */
+int wl_cfgnan_cancel_ranging(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status);
+bool wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg);
+uint8 wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg);
+extern int wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status);
+extern void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
+ nan_data_path_id ndp_id);
+nan_status_type_t wl_cfgvendor_brcm_to_nanhal_status(int32 vendor_status);
typedef enum {
NAN_ATTRIBUTE_HEADER = 100,
NAN_ATTRIBUTE_SVC_RESPONDER_POLICY = 217,
NAN_ATTRIBUTE_EVENT_MASK = 218,
NAN_ATTRIBUTE_SUB_SID_BEACON = 219,
- NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL = 220
+ NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL = 220,
+ NAN_ATTRIBUTE_CMD_RESP_DATA = 221
} NAN_ATTRIBUTE;
+
+enum geofence_suspend_reason {
+ RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG = 0,
+ RTT_GEO_SUSPN_PEER_RTT_TRIGGER = 1,
+ RTT_GEO_SUSPN_HOST_NDP_TRIGGER = 2,
+ RTT_GEO_SUSPN_PEER_NDP_TRIGGER = 3,
+ RTT_GEO_SUSPN_RANGE_RES_REPORTED = 4
+};
#endif /* _wl_cfgnan_h_ */
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.c 767848 2018-06-15 09:33:44Z $
+ * $Id: wl_cfgp2p.c 819430 2019-05-13 11:38:06Z $
*
*/
#include <typedefs.h>
#include <asm/uaccess.h>
#include <bcmutils.h>
+#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <ethernet.h>
#include <802.11.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
+#include <wl_cfgscan.h>
#include <wldev_common.h>
#include <wl_android.h>
#include <dngl_stats.h>
#include <dhdioctl.h>
#include <wlioctl.h>
#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
#include <dhd_config.h>
-#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
-extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
-#endif /* BCMPCIE && DHD_FW_COREDUMP */
static s8 scanparambuf[WLC_IOCTL_SMLEN];
-static bool
-wl_cfgp2p_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+static bool wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type);
static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
s32
wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
{
- CFGP2P_INFO(("In\n"));
+#ifdef WL_P2P_USE_RANDMAC
+ struct ether_addr primary_mac;
+#endif /* WL_P2P_USE_RANDMAC */
cfg->p2p = MALLOCZ(cfg->osh, sizeof(struct p2p_info));
if (cfg->p2p == NULL) {
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
return -ENOMEM;
}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- cfg->p2p->cfg = cfg;
-#endif
+#ifdef WL_P2P_USE_RANDMAC
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
{
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
s32 ret = BCME_OK;
s32 val = 0;
+#ifdef WL_P2P_USE_RANDMAC
+ struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+#else
+ struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+ struct ether_addr *p2p_dev_addr = &null_eth_addr;
+#endif // endif
/* Do we have to check whether APSTA is enabled or not ? */
ret = wldev_iovar_getint(ndev, "apsta", &val);
if (ret < 0) {
* After Initializing firmware, we have to set current mac address to
* firmware for P2P device address
*/
- ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
- sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", p2p_dev_addr,
+ sizeof(*p2p_dev_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
if (ret && ret != BCME_UNSUPPORTED) {
CFGP2P_ERR(("failed to update device address ret %d\n", ret));
}
wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
s32 ret;
+#ifdef WL_DISABLE_HE_P2P
+ s32 bssidx = 0;
+#endif /* WL_DISABLE_HE_P2P */
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
CFGP2P_ERR(("------ cfg p2p_ifdel "MACDBG" dev->ifindex:%d\n",
if (unlikely(ret < 0)) {
printk("'cfg p2p_ifdel' error %d\n", ret);
}
+#ifdef WL_DISABLE_HE_P2P
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, netdev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = BCME_ERROR;
+ return ret;
+ }
+ WL_DBG(("Enabling back HE for P2P\n"));
+ wl_cfg80211_set_he_mode(netdev, cfg, bssidx, WL_IF_TYPE_P2P_DISC, TRUE);
+ if (ret < 0) {
+ WL_ERR(("failed to set he features, error=%d\n", ret));
+ }
+#endif /* WL_DISABLE_HE_P2P */
+
return ret;
}
BCM_REFERENCE(ndev);
CFGP2P_DBG(("enter\n"));
+
if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
CFGP2P_ERR(("do nothing, already initialized\n"));
- return ret;
+ goto exit;
}
ret = wl_cfgp2p_set_discovery(cfg, 1);
/* Set the initial discovery state to SCAN */
ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
if (unlikely(ret != 0)) {
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
wl_cfgp2p_set_discovery(cfg, 0);
bcm_struct_cfgdev *cfgdev;
CFGP2P_DBG(("enter\n"));
+ mutex_lock(&cfg->if_sync);
+#ifdef WL_IFACE_MGMT
+ if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_P2P_DISC)) != BCME_OK) {
+ WL_ERR(("secondary iface is active, p2p enable discovery is not supported\n"));
+ goto exit;
+ }
+#endif /* WL_IFACE_MGMT */
+
if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
CFGP2P_DBG((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
} else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
- return BCME_ERROR;
+ ret = BCME_ERROR;
+ goto exit;
}
#if defined(WL_CFG80211_P2P_DEV_IF)
if (ret) {
wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
}
+ mutex_unlock(&cfg->if_sync);
return ret;
}
return ret;
}
+/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
s32
-wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_scan,
u32 num_chans, u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose)
s8 *memblk;
wl_p2p_scan_t *p2p_params;
wl_escan_params_t *eparams;
+ wl_escan_params_v2_t *eparams_v2;
wlc_ssid_t ssid;
- /* Scan parameters */
-#define P2PAPI_SCAN_NPROBES 1
-#define P2PAPI_SCAN_DWELL_TIME_MS 80
-#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
-#define P2PAPI_SCAN_HOME_TIME_MS 60
-#define P2PAPI_SCAN_NPROBS_TIME_MS 30
-#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
-
- struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ u32 sync_id = 0;
+ s32 nprobes = 0;
+ s32 active_time = 0;
+ const struct ether_addr *mac_addr = NULL;
+ u32 scan_type = 0;
+ struct net_device *pri_dev = NULL;
+
+ pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
- eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
- OFFSETOF(wl_escan_params_t, params)) +
- num_chans * sizeof(eparams->params.channel_list[0]);
+ if (cfg->scan_params_v2) {
+ eparams_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ } else {
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+ }
memsize = sizeof(wl_p2p_scan_t) + eparams_size;
memblk = scanparambuf;
memsize, sizeof(scanparambuf)));
return -1;
}
- memset(memblk, 0, memsize);
- memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+ bzero(memblk, memsize);
+ bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
if (search_state == WL_P2P_DISC_ST_SEARCH) {
/*
* If we in SEARCH STATE, we don't need to set SSID explictly
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
/* use null ssid */
ssid.SSID_len = 0;
- memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
} else if (search_state == WL_P2P_DISC_ST_SCAN) {
/* SCAN STATE 802.11 SCAN
* WFD Supplicant has p2p_find command with (type=progressive, type= full)
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
/* use wild card ssid */
ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
- memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+ bzero(&ssid.SSID, sizeof(ssid.SSID));
memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
} else {
CFGP2P_ERR((" invalid search state %d\n", search_state));
/* Fill in the P2P scan structure at the start of the iovar param block */
p2p_params = (wl_p2p_scan_t*) memblk;
p2p_params->type = 'E';
- /* Fill in the Scan structure that follows the P2P scan structure */
- eparams = (wl_escan_params_t*) (p2p_params + 1);
- eparams->params.bss_type = DOT11_BSSTYPE_ANY;
- if (active)
- eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
- else
- eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
-
- if (tx_dst_addr == NULL)
- memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN);
- else
- memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
- if (ssid.SSID_len)
- memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+ if (!active_scan) {
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
- eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ if (tx_dst_addr == NULL) {
+ mac_addr = ðer_bcast;
+ } else {
+ mac_addr = tx_dst_addr;
+ }
switch (p2p_scan_purpose) {
case P2P_SCAN_SOCIAL_CHANNEL:
- eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ active_time = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
break;
case P2P_SCAN_AFX_PEER_NORMAL:
case P2P_SCAN_AFX_PEER_REDUCED:
- eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+ active_time = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
break;
case P2P_SCAN_CONNECT_TRY:
- eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ active_time = WL_SCAN_CONNECT_DWELL_TIME_MS;
break;
- default :
- if (wl_get_drv_status_all(cfg, CONNECTED))
- eparams->params.active_time = -1;
- else
- eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ default:
+ active_time = wl_get_drv_status_all(cfg, CONNECTED) ?
+ -1 : P2PAPI_SCAN_DWELL_TIME_MS;
break;
}
- if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
- eparams->params.nprobes = htod32(eparams->params.active_time /
- WL_SCAN_JOIN_PROBE_INTERVAL_MS);
- else
- eparams->params.nprobes = htod32((eparams->params.active_time /
- P2PAPI_SCAN_NPROBS_TIME_MS));
-
- if (eparams->params.nprobes <= 0)
- eparams->params.nprobes = 1;
- CFGP2P_DBG(("nprobes # %d, active_time %d\n",
- eparams->params.nprobes, eparams->params.active_time));
- eparams->params.passive_time = htod32(-1);
- eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
-
- for (i = 0; i < num_chans; i++) {
- eparams->params.channel_list[i] =
- wl_ch_host_to_driver(channels[i]);
- }
- eparams->version = htod32(ESCAN_REQ_VERSION);
- eparams->action = htod16(action);
- wl_escan_set_sync_id(eparams->sync_id, cfg);
+ if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) {
+ nprobes = active_time /
+ WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active_time /
+ P2PAPI_SCAN_NPROBS_TIME_MS;
+ }
+
+ if (nprobes <= 0) {
+ nprobes = 1;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t*) (p2p_params + 1);
+ eparams_v2->version = htod16(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->params.version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ eparams_v2->params.length = htod16(sizeof(wl_scan_params_v2_t));
+ eparams_v2->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams_v2->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams_v2->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams_v2->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams_v2->params.active_time = htod32(active_time);
+ eparams_v2->params.nprobes = htod32(nprobes);
+ eparams_v2->params.passive_time = htod32(-1);
+ eparams_v2->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams_v2->params.channel_list[i] =
+ wl_ch_host_to_driver(channels[i]);
+ }
+ eparams_v2->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams_v2->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams_v2->sync_id;
+ } else {
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ eparams->params.scan_type = htod32(scan_type);
+ (void)memcpy_s(&eparams->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ eparams->params.active_time = htod32(active_time);
+ eparams->params.nprobes = htod32(nprobes);
+ eparams->params.passive_time = htod32(-1);
+ eparams->sync_id = sync_id;
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] =
+ wl_ch_host_to_driver(channels[i]);
+ }
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ if (ssid.SSID_len)
+ (void)memcpy_s(&eparams->params.ssid,
+ sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ sync_id = eparams->sync_id;
+ }
+
wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
- CFGP2P_DBG(("SCAN CHANNELS : "));
+ CFGP2P_DBG(("nprobes:%d active_time:%d\n", nprobes, active_time));
+ CFGP2P_DBG(("SCAN CHANNELS : "));
CFGP2P_DBG(("%d", channels[0]));
for (i = 1; i < num_chans; i++) {
CFGP2P_DBG((",%d", channels[i]));
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- WL_MSG(dev->name, "P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx);
- if (ret == BCME_OK)
+ WL_MSG(dev->name, "P2P_SEARCH sync ID: %d, bssidx: %d\n", sync_id, bssidx);
+ if (ret == BCME_OK) {
wl_set_p2p_status(cfg, SCANNING);
+ }
return ret;
}
* not update the tlvs buffer pointer/length.
*/
static bool
-wl_cfgp2p_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
+ if (ie->len >= oui_len + 1 &&
+ !bcmp(ie->data, oui, oui_len) &&
+ type == ie->data[oui_len]) {
return TRUE;
}
- if (tlvs == NULL)
- return FALSE;
/* point to the next ie */
- ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
- /* calculate the length of the rest of the buffer */
- *tlvs_len -= (int)(ie - *tlvs);
- /* update the pointer to the start of the buffer */
- *tlvs = ie;
+ if (tlvs != NULL) {
+ bcm_tlv_buffer_advance_past(ie, tlvs, tlvs_len);
+ }
return FALSE;
}
const bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wpa_ie((const u8*)ie, (u8 const **)&parse, &len)) {
+ if (wl_cfgp2p_is_wpa_ie(ie, &parse, &len)) {
return (const wpa_ie_fixed_t *)ie;
}
}
const bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wps_ie((const u8*)ie, (u8 const **)&parse, &len)) {
+ if (wl_cfgp2p_is_wps_ie(ie, &parse, &len)) {
return (const wpa_ie_fixed_t *)ie;
}
}
bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie((const uint8*)ie, (u8 const **)&parse, &len)) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &len)) {
return (wifi_p2p_ie_t *)ie;
}
}
const bcm_tlv_t *ie;
while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wfd_ie((const uint8*)ie, (u8 const **)&parse, &len)) {
+ if (wl_cfgp2p_is_wfd_ie(ie, &parse, &len)) {
return (const wifi_wfd_ie_t *)ie;
}
}
/* Validate the pktflag parameter */
if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG |
+ VNDR_IE_DISASSOC_FLAG))) {
CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
return -1;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
- hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strlcpy(hdr.cmd, add_del_cmd, sizeof(hdr.cmd));
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+ /* For vendor ID DOT11_MNG_ID_EXT_ID, need to set pkt flag to VNDR_IE_CUSTOM_FLAG */
+ if (ie_id == DOT11_MNG_ID_EXT_ID) {
+ pktflag = pktflag | VNDR_IE_CUSTOM_FLAG;
+ }
+
/* Copy packet flags that indicate which packets will contain this IE */
pktflag = htod32(pktflag);
memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
CFGP2P_ERR((" argument is invalid\n"));
goto exit;
}
-
if (!cfg->p2p) {
CFGP2P_ERR(("p2p if does not exist\n"));
goto exit;
}
-
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
*type = i;
* so lets do it from thread context.
*/
void
-wl_cfgp2p_listen_expired(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-)
+wl_cfgp2p_listen_expired(unsigned long data)
{
wl_event_msg_t msg;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct p2p_info *p2p = from_timer(p2p, t, listen_timer);
- struct bcm_cfg80211 *cfg = p2p->cfg;
-#else
struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
-#endif
struct net_device *ndev;
CFGP2P_DBG((" Enter\n"));
{
#define EXTRA_DELAY_TIME 100
s32 ret = BCME_OK;
- struct timer_list *_timer;
+ timer_list_compat_t *_timer;
s32 extra_delay;
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
CFGP2P_DBG((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
if (status == WLC_E_STATUS_SUCCESS) {
wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
- CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+ CFGP2P_ACTION(("TX actfrm : ACK\n"));
if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
CFGP2P_DBG(("no need to wait next AF.\n"));
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
}
else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
wl_set_p2p_status(cfg, ACTION_TX_NOACK);
- CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+ if (status == WLC_E_STATUS_SUPPRESS) {
+ CFGP2P_ACTION(("TX actfrm : SUPPRES\n"));
+ } else {
+ CFGP2P_ACTION(("TX actfrm : NO ACK\n"));
+ }
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
}
} else {
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (ret < 0) {
- CFGP2P_ERR((" sending action frame is failed\n"));
+ CFGP2P_ACTION(("TX actfrm : ERROR\n"));
goto exit;
}
struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
struct ether_addr *int_addr;
+#ifdef WL_P2P_USE_RANDMAC
+ dhd_generate_mac_addr(mac_addr);
+#else
memcpy(mac_addr, primary_addr, sizeof(struct ether_addr));
mac_addr->octet[0] |= 0x02;
WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet)));
+#endif /* WL_P2P_USE_RANDMAC */
int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1);
memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
int iovar_len = sizeof(dongle_noa);
CFGP2P_DBG((" Enter\n"));
- memset(&dongle_noa, 0, sizeof(dongle_noa));
+ bzero(&dongle_noa, sizeof(dongle_noa));
if (wl_cfgp2p_vif_created(cfg)) {
cfg->p2p->noa.desc[0].start = 0;
return BCME_ERROR;
}
+ memset_s(&csa_arg, sizeof(csa_arg), 0, sizeof(csa_arg));
csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
csa_arg.count = P2P_ECSA_CNT;
csa_arg.reg = 0;
const u8*
wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib)
{
- const bcm_tlv_t *ie;
+ bcm_tlv_t *ie;
const u8* pAttrib;
+ uint ie_len;
CFGP2P_DBG(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
+ ie_len = len;
+ while ((ie = bcm_parse_tlvs(parse, ie_len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie(ie, &parse, &ie_len) == TRUE) {
/* Have the P2p ie. Now check for attribute */
if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(ie, attrib)) != NULL) {
CFGP2P_DBG(("P2P attribute %d was found at parse %p",
return pAttrib;
}
else {
- parse += (ie->len + TLV_HDR_LEN);
- len -= (ie->len + TLV_HDR_LEN);
+ /* move to next IE */
+ bcm_tlv_buffer_advance_past(ie, &parse, &ie_len);
+
CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
- " to %p len to %d", attrib, parse, len));
+ " to %p len to %d", attrib, parse, ie_len));
}
}
else {
/* It was not p2p IE. parse will get updated automatically to next TLV */
- CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+ CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, ie_len));
}
}
CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
return ERR_PTR(-ENOMEM);
}
- memset(&primary_mac, 0, sizeof(primary_mac));
+ bzero(&primary_mac, sizeof(primary_mac));
get_primary_mac(cfg, &primary_mac);
+#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
+ struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (!cfg)
CFGP2P_DBG(("Enter\n"));
+ /* Check if cfg80211 interface is already down */
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ WL_DBG(("cfg80211 interface is already down\n"));
+ return; /* it is even not ready */
+ }
+
ret = wl_cfg80211_scan_stop(cfg, wdev);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
- if (!cfg->p2p)
+ if (!p2p_is_on(cfg)) {
return;
+ }
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
/* Cancel any on-going listen */
wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
bool rollback_lock = false;
if (!wdev || !cfg) {
- WL_ERR(("null ptr. wdev:%p cfg:%p\n", wdev, cfg));
+ WL_ERR(("wdev or cfg is NULL\n"));
return -EINVAL;
}
WL_INFORM(("Enter\n"));
+ if (!cfg->p2p_wdev) {
+ WL_ERR(("Already deleted p2p_wdev\n"));
+ return -EINVAL;
+ }
+
if (!rtnl_is_locked()) {
rtnl_lock();
rollback_lock = true;
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.h 710886 2017-07-14 08:39:03Z $
+ * $Id: wl_cfgp2p.h 794110 2018-12-12 05:03:21Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
};
struct p2p_info {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct bcm_cfg80211 *cfg;
-#endif
bool on; /**< p2p on/off switch */
bool scan;
int16 search_state;
s8 vir_ifname[IFNAMSIZ];
unsigned long status;
struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
- struct timer_list listen_timer;
+ timer_list_compat_t listen_timer;
wl_p2p_sched_t noa;
wl_p2p_ops_t ops;
wlc_ssid_t ssid;
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
+#define CFGP2P_ACTION(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \
+ printk args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", \
+ dhd_log_dump_get_timestamp(), __func__); \
+ DHD_LOG_DUMP_WRITE args; \
+ } \
+ } while (0)
#else
#define CFGP2P_ERR(args) \
do { \
printk args; \
} \
} while (0)
-#endif /* DHD_LOG_DUMP */
-#define CFGP2P_DBG(args) \
+#define CFGP2P_ACTION(args) \
do { \
- if (wl_dbg_level & WL_DBG_DBG) { \
- printk(KERN_INFO "CFGP2P-DEBUG) %s :", __func__); \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printk(KERN_INFO "CFGP2P-ACTION) %s :", __func__); \
printk args; \
} \
} while (0)
+#endif /* DHD_LOG_DUMP */
-#define CFGP2P_ACTION(args) \
+#define CFGP2P_DBG(args) \
do { \
- if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
- printk(KERN_INFO "CFGP2P-ACTION) %s :", __func__); \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_INFO "CFGP2P-DEBUG) %s :", __func__); \
printk args; \
} \
} while (0)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-#define INIT_TIMER(timer, func, duration, extra_delay) \
- do { \
- timer_setup(timer, func, 0); \
- timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
- add_timer(timer); \
- } while (0);
-#else
+
#define INIT_TIMER(timer, func, duration, extra_delay) \
do { \
- init_timer(timer); \
- timer->function = func; \
- timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
- timer->data = (unsigned long) cfg; \
+ init_timer_compat(timer, func, cfg); \
+ timer_expires(timer) = jiffies + msecs_to_jiffies(duration + extra_delay); \
add_timer(timer); \
} while (0);
-#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
#define P2P_ECSA_CNT 50
extern void
-wl_cfgp2p_listen_expired(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-);
+wl_cfgp2p_listen_expired(unsigned long data);
extern bool
wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
extern bool
--- /dev/null
+/*
+ * Linux cfg80211 driver scan related code
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmstdlib_s.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <bcmiov.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <bcmevent.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
+#include <wl_cfgp2p.h>
+#include <bcmdevs.h>
+#include <wl_android.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#include <wl_cfgvendor.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif // endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include "dhd_rtt.h"
+#endif /* RTT_SUPPORT */
+#include <dhd_config.h>
+
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
+ * Attribute ID(1) + Length(2) + 1(Mininum length:1)
+ */
+#define MAX_P2P_IE_LEN 251 /* Up To 251 */
+
+#define WPS_ATTR_REQ_TYPE 0x103a
+#define WPS_REQ_TYPE_ENROLLEE 0x01
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+#ifdef P2P_LISTEN_OFFLOADING
+void wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+static void _wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
+
+extern int passive_channel_skip;
+
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
+ * is used everywhere and changing its prototype to take const qualifier needs
+ * a massive change to all its callers...
+ */
+
+ if ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_INTERWORKING_ID))) {
+ return ie;
+ }
+ return NULL;
+}
+
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+ ie_setbuf_t ie_setbuf;
+
+ WL_DBG(("clear interworking IE\n"));
+
+ bzero(&ie_setbuf, sizeof(ie_setbuf_t));
+
+ ie_setbuf.ie_buffer.iecount = htod32(1);
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
+
+ return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+}
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ ie_setbuf_t *ie_setbuf;
+ ie_getbuf_t ie_getbufp;
+ char getbuf[WLC_IOCTL_SMLEN];
+
+ if (ie_id != DOT11_MNG_INTERWORKING_ID) {
+ WL_ERR(("unsupported (id=%d)\n", ie_id));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* access network options (1 octet) is the mandatory field */
+ if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
+ WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
+ return BCME_BADARG;
+ }
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+ VNDR_IE_CUSTOM_FLAG))) {
+ WL_ERR(("invalid packet flag 0x%x\n", pktflag));
+ return BCME_BADARG;
+ }
+
+ buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+
+ ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
+ if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
+ sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
+ == BCME_OK) {
+ if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
+ WL_DBG(("skip to set interworking IE\n"));
+ return BCME_OK;
+ }
+ }
+
+ /* if already set with previous values, delete it first */
+ if (cfg->wl11u) {
+ if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
+ return err;
+ }
+ }
+
+ ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
+ if (!ie_setbuf) {
+ WL_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ strlcpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+
+ /* Buffer contains only 1 IE */
+ ie_setbuf->ie_buffer.iecount = htod32(1);
+ /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+ ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
+
+ /* Now, add the IE to the buffer */
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+ /* Returning void here as max data_len can be 8 */
+ (void)memcpy_s((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], sizeof(uint8),
+ data, data_len);
+
+ if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
+ == BCME_OK) {
+ WL_DBG(("set interworking IE\n"));
+ cfg->wl11u = TRUE;
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ }
+
+ MFREE(cfg->osh, ie_setbuf, buf_len);
+ return err;
+}
+#endif /* WL11U */
+
+#ifdef WL_BCNRECV
+/* Beacon recv results handler sending to upper layer */
+static s32
+wl_bcnrecv_result_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ wl_bss_info_v109_2_t *bi, uint32 scan_status)
+{
+ s32 err = BCME_OK;
+ struct wiphy *wiphy = NULL;
+ wl_bcnrecv_result_t *bcn_recv = NULL;
+ struct osl_timespec ts;
+ if (!bi) {
+ WL_ERR(("%s: bi is NULL\n", __func__));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ if ((bi->length - bi->ie_length) < sizeof(wl_bss_info_v109_2_t)) {
+ WL_ERR(("bi info version doesn't support bcn_recv attributes\n"));
+ goto exit;
+ }
+
+ if (scan_status == WLC_E_STATUS_RXBCN) {
+ wiphy = cfg->wdev->wiphy;
+ if (!wiphy) {
+ WL_ERR(("wiphy is NULL\n"));
+ err = BCME_NORESOURCE;
+ goto exit;
+ }
+ bcn_recv = (wl_bcnrecv_result_t *)MALLOCZ(cfg->osh, sizeof(*bcn_recv));
+ if (unlikely(!bcn_recv)) {
+ WL_ERR(("Failed to allocate memory\n"));
+ return -ENOMEM;
+ }
+ /* Returning void here as copy size does not exceed dest size of SSID */
+ (void)memcpy_s((char *)bcn_recv->SSID, DOT11_MAX_SSID_LEN,
+ (char *)bi->SSID, DOT11_MAX_SSID_LEN);
+ /* Returning void here as copy size does not exceed dest size of ETH_LEN */
+ (void)memcpy_s(&bcn_recv->BSSID, ETHER_ADDR_LEN, &bi->BSSID, ETH_ALEN);
+ bcn_recv->channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+ bcn_recv->beacon_interval = bi->beacon_period;
+
+ /* kernal timestamp */
+ osl_get_monotonic_boottime(&ts);
+ bcn_recv->system_time = ((u64)ts.tv_sec*1000000)
+ + ts.tv_nsec / 1000;
+ bcn_recv->timestamp[0] = bi->timestamp[0];
+ bcn_recv->timestamp[1] = bi->timestamp[1];
+ if ((err = wl_android_bcnrecv_event(cfgdev_to_wlc_ndev(cfgdev, cfg),
+ BCNRECV_ATTR_BCNINFO, 0, 0,
+ (uint8 *)bcn_recv, sizeof(*bcn_recv)))
+ != BCME_OK) {
+ WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
+ }
+ } else {
+ WL_DBG(("Ignoring Escan Event:%d \n", scan_status));
+ }
+exit:
+ if (bcn_recv) {
+ MFREE(cfg->osh, bcn_recv, sizeof(*bcn_recv));
+ }
+ return err;
+}
+#endif /* WL_BCNRECV */
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#ifndef WL_DRV_AVOID_SCANCACHE
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len) {
+ /* In the below memcpy operation the candidate array always has the
+ * buffer space available to max 'len' calculated in the for loop.
+ */
+ (void)memcpy_s(&candidate[idx + 1],
+ (sizeof(removal_element_t) * len),
+ &candidate[idx], sizeof(removal_element_t) * len);
+ }
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ (void)memcpy_s(&candidate[idx].BSSID, ETHER_ADDR_LEN,
+ &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
+ }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
+ }
+ break;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ }
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+s32
+wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_escan_result_t *escan_result;
+ struct net_device *ndev = NULL;
+#ifndef WL_DRV_AVOID_SCANCACHE
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ const wifi_p2p_ie_t * p2p_ie;
+ const u8 *p2p_dev_addr = NULL;
+ wl_scan_results_t *list;
+ wl_bss_info_t *bss = NULL;
+ u32 i;
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ u16 channel;
+ struct ieee80211_supported_band *band;
+
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->scan_sync);
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ ndev = cfg->afx_hdl->dev;
+ else
+ ndev = cfg->escan_info.ndev;
+ }
+ escan_result = (wl_escan_result_t *)data;
+#ifdef WL_BCNRECV
+ if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED &&
+ status == WLC_E_STATUS_RXBCN) {
+ /* handle beacon recv scan results */
+ wl_bss_info_v109_2_t *bi_info;
+ bi_info = (wl_bss_info_v109_2_t *)escan_result->bss_info;
+ err = wl_bcnrecv_result_handler(cfg, cfgdev, bi_info, status);
+ goto exit;
+ }
+#endif /* WL_BCNRECV */
+ if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+ WL_ERR_RLMT(("escan is not ready. drv_scan_status 0x%x"
+ " e_type %d e_states %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ ntoh32(e->event_type), ntoh32(e->status)));
+ goto exit;
+ }
+
+#ifndef WL_DRV_AVOID_SCANCACHE
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
+ (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
+ WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+
+ /* +++++ terence 20130524: skip invalid bss */
+ channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band\n"));
+ goto exit;
+ }
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ goto exit;
+ /* ----- terence 20130524: skip invalid bss */
+
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0)
+ goto exit;
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0))
+ channel = WL_INVALID;
+ else
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ goto exit;
+ }
+
+ } else {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ list = wl_escan_get_buf(cfg, FALSE);
+ if (scan_req_match(cfg)) {
+#ifdef WL_HOST_BAND_MGMT
+ s32 channel_band = 0;
+ chanspec_t chspec;
+#endif /* WL_HOST_BAND_MGMT */
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length)) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe"
+ " response/beacon\n"));
+ goto exit;
+ }
+#ifdef WL_HOST_BAND_MGMT
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ channel_band = CHSPEC2WLC_BAND(chspec);
+
+ if ((cfg->curr_band == WLC_BAND_5G) &&
+ (channel_band == WLC_BAND_2G)) {
+ /* Avoid sending the GO results in band conflict */
+ if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
+ P2P_SEID_GROUP_ID) != NULL)
+ goto exit;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+ if (!bss) {
+ WL_ERR(("bss is NULL\n"));
+ goto exit;
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ WL_DBG(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
+
+ if (remove_lower_rssi)
+ wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+ /* do not allow beacon data to update
+ *the data recd from a probe response
+ */
+ if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+
+ WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+ " flags 0x%x, new: RSSI %d flags 0x%x\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+ bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+ if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ WL_DBG(("%s("MACDBG"), same onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ WL_DBG(("%s("MACDBG"), prev onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = bss->RSSI;
+ bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+ }
+ if (dtoh32(bss->length) != bi_length) {
+ u32 prev_len = dtoh32(bss->length);
+
+ WL_DBG(("bss info replacement"
+ " is occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length));
+ WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ prev_len, bi_length));
+
+ if ((list->buflen - prev_len) + bi_length
+ > ESCAN_BUF_SIZE) {
+ WL_ERR(("Buffer is too small: keep the"
+ " previous result of this AP\n"));
+ /* Only update RSSI */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= (bi->flags
+ & WL_BSS_FLAGS_RSSI_ONCHANNEL);
+ goto exit;
+ }
+
+ if (i < list->count - 1) {
+ /* memory copy required by this case only */
+ memmove((u8 *)bss + bi_length,
+ (u8 *)bss + prev_len,
+ list->buflen - cur_len - prev_len);
+ }
+ list->buflen -= prev_len;
+ list->buflen += bi_length;
+ }
+ list->version = dtoh32(bi->version);
+ /* In the above code under check
+ * '(dtoh32(bss->length) != bi_length)'
+ * buffer overflow is avoided. bi_length
+ * is already accounted in list->buflen
+ */
+ if ((err = memcpy_s((u8 *)bss,
+ (ESCAN_BUF_SIZE - (list->buflen - bi_length)),
+ (u8 *)bi, bi_length)) != BCME_OK) {
+ WL_ERR(("Failed to copy the recent bss_info."
+ "err:%d recv_len:%d bi_len:%d\n", err,
+ ESCAN_BUF_SIZE - (list->buflen - bi_length),
+ bi_length));
+ /* This scenario should never happen. If it happens,
+ * set list->count to zero for recovery
+ */
+ list->count = 0;
+ list->buflen = 0;
+ ASSERT(0);
+ }
+ goto exit;
+ }
+ cur_len += dtoh32(bss->length);
+ }
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ goto exit;
+ }
+#else
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ }
+ /* In the previous step check is added to ensure the bi_legth does not
+ * exceed the ESCAN_BUF_SIZE
+ */
+ (void)memcpy_s(&(((char *)list)[list->buflen]),
+ (ESCAN_BUF_SIZE - list->buflen), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
+ goto exit;
+ }
+ }
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+ escan_result->sync_id);
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!scan_req_match(cfg)) {
+ WL_DBG(("SCAN COMPLETED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, false, false);
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Dump FW preserve buffer content */
+ if (status == WLC_E_STATUS_ABORT) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ /* Handle all cases of scan abort */
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (p2p_scan(cfg) && cfg->scan_request &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ } else
+#endif // endif
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+#ifdef DUAL_ESCAN_RESULT_BUFFER
+ if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
+ /* If sync_id is not matching, then the abort might have
+ * come for the old scan req or for the in-driver initiated
+ * scan. So do abort for scan_req for which sync_id is
+ * matching.
+ */
+ WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
+ "Ignore the scan abort event.\n",
+ escan_result->sync_id, cfg->escan_info.cur_sync_id));
+ goto exit;
+ } else {
+ /* sync id is matching, abort the scan */
+ WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
+ cfg->escan_info.cur_sync_id));
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ }
+#else
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+#endif /* DUAL_ESCAN_RESULT_BUFFER */
+ } else {
+ /* If there is no pending host initiated scan, do nothing */
+ WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_DBG(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+ "scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ }
+ wl_escan_increment_sync_id(cfg, 2);
+ }
+#else /* WL_DRV_AVOID_SCANCACHE */
+ err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
+#endif /* WL_DRV_AVOID_SCANCACHE */
+exit:
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+ const u8 *ie, u32 ie_len)
+{
+ const wifi_p2p_ie_t *p2p_ie;
+ const u8 *end, *pos;
+ s32 listen_channel;
+
+ pos = (const u8 *)ie;
+
+ p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+ if (p2p_ie == NULL) {
+ return 0;
+ }
+
+ if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
+ CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
+ return 0;
+ }
+ pos = p2p_ie->subelts;
+ end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+ CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+ p2p_ie->len));
+
+ while (pos < end) {
+ uint16 attr_len;
+ if (pos + 2 >= end) {
+ CFGP2P_DBG((" -- Invalid P2P attribute"));
+ return 0;
+ }
+ attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+ if (pos + 3 + attr_len > end) {
+ CFGP2P_DBG(("P2P: Attribute underflow "
+ "(len=%u left=%d)",
+ attr_len, (int) (end - pos - 3)));
+ return 0;
+ }
+
+ /* if Listen Channel att id is 6 and the vailue is valid,
+ * return the listen channel
+ */
+ if (pos[0] == 6) {
+ /* listen channel subel length format
+ * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+ */
+ listen_channel = pos[1 + 2 + 3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+ return listen_channel;
+ }
+ }
+ pos += 3 + attr_len;
+ }
+ return 0;
+}
+
+#ifdef WL_SCAN_TYPE
+static u32
+wl_cfgscan_map_nl80211_scan_type(struct bcm_cfg80211 *cfg, struct cfg80211_scan_request *request)
+{
+ u32 scan_flags = 0;
+
+ if (!request) {
+ return scan_flags;
+ }
+
+ if (request->flags & NL80211_SCAN_FLAG_LOW_SPAN) {
+ scan_flags |= WL_SCANFLAGS_LOW_SPAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) {
+ scan_flags |= WL_SCANFLAGS_HIGH_ACCURACY;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_POWER) {
+ scan_flags |= WL_SCANFLAGS_LOW_POWER_SCAN;
+ }
+ if (request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) {
+ scan_flags |= WL_SCANFLAGS_LOW_PRIO;
+ }
+
+ WL_INFORM(("scan flags. wl:%x cfg80211:%x\n", scan_flags, request->flags));
+ return scan_flags;
+}
+#endif /* WL_SCAN_TYPE */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))
+#else
+#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+#endif // endif
+static void
+wl_cfgscan_populate_scan_channels(struct bcm_cfg80211 *cfg, u16 *channel_list,
+ struct cfg80211_scan_request *request, u32 *num_channels)
+{
+ u32 i = 0, j = 0;
+ u32 channel;
+ u32 n_channels = 0;
+ u32 chanspec = 0;
+
+ if (!request || !request->n_channels) {
+ /* Do full channel scan */
+ return;
+ }
+
+ n_channels = request->n_channels;
+ for (i = 0; i < n_channels; i++) {
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ /* SKIP DFS channels for Secondary interface */
+ if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+ (IS_RADAR_CHAN(request->channels[i]->flags)))
+ continue;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
+
+ chanspec = WL_CHANSPEC_BW_20;
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ if (request->channels[i]->band == IEEE80211_BAND_60GHZ) {
+ /* Not supported */
+ continue;
+ }
+#endif /* LINUX_VER >= 3.6 */
+
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+#ifdef WL_HOST_BAND_MGMT
+ if (cfg->curr_band == WLC_BAND_5G) {
+ WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
+ continue;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+#ifdef WL_HOST_BAND_MGMT
+ if (cfg->curr_band == WLC_BAND_2G) {
+ WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
+ continue;
+ }
+#endif /* WL_HOST_BAND_MGMT */
+ chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ channel_list[j] = channel;
+ channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+ channel_list[j] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, channel_list[j]));
+ channel_list[j] = wl_chspec_host_to_driver(channel_list[j]);
+ j++;
+ }
+ *num_channels = j;
+
+}
+
+static void
+wl_cfgscan_populate_scan_ssids(struct bcm_cfg80211 *cfg, u8 *buf_ptr, u32 buf_len,
+ struct cfg80211_scan_request *request, u32 *ssid_num)
+{
+ u32 n_ssids;
+ wlc_ssid_t ssid;
+ int i, j = 0;
+
+ if (!request || !buf_ptr) {
+ /* Do full channel scan */
+ return;
+ }
+
+ n_ssids = request->n_ssids;
+ if (n_ssids > 0) {
+
+ if (buf_len < (n_ssids * sizeof(wlc_ssid_t))) {
+ WL_ERR(("buf len not sufficient for scan ssids\n"));
+ return;
+ }
+
+ for (i = 0; i < n_ssids; i++) {
+ bzero(&ssid, sizeof(wlc_ssid_t));
+ ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
+ /* Returning void here, as per previous line copy length does not exceed
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid.SSID, DOT11_MAX_SSID_LEN, request->ssids[i].ssid,
+ ssid.SSID_len);
+ if (!ssid.SSID_len) {
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ } else {
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ }
+ /* For multiple ssid case copy the each SSID info the ptr below corresponds
+ * to that so dest is of type wlc_ssid_t
+ */
+ (void)memcpy_s(buf_ptr, sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
+ buf_ptr += sizeof(wlc_ssid_t);
+ j++;
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ *ssid_num = j;
+}
+
+static s32
+wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
+ struct cfg80211_scan_request *request)
+{
+ wl_scan_params_t *params = NULL;
+ wl_scan_params_v2_t *params_v2 = NULL;
+ u32 scan_type = 0;
+ u32 scan_param_size = 0;
+ u32 n_channels = 0;
+ u32 n_ssids = 0;
+ uint16 *chan_list = NULL;
+ u32 channel_offset = 0;
+ u32 cur_offset;
+
+ if (!scan_params) {
+ return BCME_ERROR;
+ }
+
+ if (cfg->active_scan == PASSIVE_SCAN) {
+ WL_INFORM_MEM(("Enforcing passive scan\n"));
+ scan_type = WL_SCANFLAGS_PASSIVE;
+ }
+
+ WL_DBG(("Preparing Scan request\n"));
+ if (cfg->scan_params_v2) {
+ params_v2 = (wl_scan_params_v2_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_v2_t);
+ channel_offset = offsetof(wl_scan_params_v2_t, channel_list);
+ } else {
+ params = (wl_scan_params_t *)scan_params;
+ scan_param_size = sizeof(wl_scan_params_t);
+ channel_offset = offsetof(wl_scan_params_t, channel_list);
+ }
+
+ if (params_v2) {
+ /* scan params ver2 */
+#if defined(WL_SCAN_TYPE)
+ scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
+#endif /* WL_SCAN_TYPE */
+
+ (void)memcpy_s(¶ms_v2->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
+ params_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
+ params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
+ params_v2->bss_type = DOT11_BSSTYPE_ANY;
+ params_v2->scan_type = htod32(scan_type);
+ params_v2->nprobes = htod32(-1);
+ params_v2->active_time = htod32(-1);
+ params_v2->passive_time = htod32(-1);
+ params_v2->home_time = htod32(-1);
+ params_v2->channel_num = 0;
+ bzero(¶ms_v2->ssid, sizeof(wlc_ssid_t));
+ chan_list = params_v2->channel_list;
+ } else {
+ /* scan params ver 1 */
+ if (!params) {
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+ (void)memcpy_s(¶ms->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = htod32(-1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(-1);
+ params->channel_num = 0;
+ bzero(¶ms->ssid, sizeof(wlc_ssid_t));
+ chan_list = params->channel_list;
+ }
+
+ if (!request) {
+ /* scan_request null, do scan based on base config */
+ WL_DBG(("scan_request is null\n"));
+ return BCME_OK;
+ }
+
+ WL_INFORM(("n_channels:%d n_ssids:%d\n", request->n_channels, request->n_ssids));
+
+ cur_offset = channel_offset;
+ /* Copy channel array if applicable */
+ if ((request->n_channels > 0) && chan_list) {
+ if (len >= (scan_param_size + (request->n_channels * sizeof(u16)))) {
+ wl_cfgscan_populate_scan_channels(cfg,
+ chan_list, request, &n_channels);
+ cur_offset += (n_channels * (sizeof(u16)));
+ }
+ }
+
+ /* Copy ssid array if applicable */
+ if (request->n_ssids > 0) {
+ cur_offset = (u32) roundup(cur_offset, sizeof(u32));
+ if (len > (cur_offset + (request->n_ssids * sizeof(wlc_ssid_t)))) {
+ u32 rem_len = len - cur_offset;
+ wl_cfgscan_populate_scan_ssids(cfg,
+ ((u8 *)scan_params + cur_offset), rem_len, request, &n_ssids);
+ }
+ }
+
+ if (n_ssids || n_channels) {
+ u32 channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+ if (params_v2) {
+ params_v2->channel_num = channel_num;
+ if (n_channels == 1) {
+ params_v2->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params_v2->nprobes = htod32(
+ params_v2->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ } else {
+ params->channel_num = channel_num;
+ if (n_channels == 1) {
+ params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params->nprobes = htod32(
+ params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ }
+ }
+ }
+
+ WL_INFORM(("scan_prep done. n_channels:%d n_ssids:%d\n", n_channels, n_ssids));
+ return BCME_OK;
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
+
+ bzero(valid_chan_list, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
+ }
+
+ return err;
+}
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size;
+ wl_escan_params_t *eparams = NULL;
+ wl_escan_params_v2_t *eparams_v2 = NULL;
+ u8 *scan_params = NULL;
+ u8 *params = NULL;
+ u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ u32 num_chans = 0;
+ s32 channel;
+ u32 n_valid_chan;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i, j, n_nodfs = 0;
+ u16 *default_chan_list = NULL;
+ wl_uint32_list_t *list;
+ s32 bssidx = -1;
+ struct net_device *dev = NULL;
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ bool is_first_init_2g_scan = false;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+ p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+ u32 chan_mem = 0;
+ u32 sync_id = 0;
+
+ WL_DBG(("Enter \n"));
+
+ /* scan request can come with empty request : perform all default scan */
+ if (!cfg) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (cfg->scan_params_v2) {
+ params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_v2_t, params));
+ } else {
+ params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ }
+
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+#ifdef USE_INITIAL_2G_SCAN
+ struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
+ /* allow one 5G channel to add previous connected channel in 5G */
+ bool allow_one_5g_channel = TRUE;
+ j = 0;
+ for (i = 0; i < request->n_channels; i++) {
+ int tmp_chan = ieee80211_frequency_to_channel
+ (request->channels[i]->center_freq);
+ if (tmp_chan > CH_MAX_2G_CHANNEL) {
+ if (allow_one_5g_channel)
+ allow_one_5g_channel = FALSE;
+ else
+ continue;
+ }
+ if (j > CH_MAX_2G_CHANNEL) {
+ WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
+ " and previous 5G connected channel\n",
+ j, CH_MAX_2G_CHANNEL));
+ break;
+ }
+ bcopy(request->channels[i], &tmp_channel_list[j],
+ sizeof(struct ieee80211_channel));
+ WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
+ j++;
+ }
+ if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
+ for (i = 0; i < j; i++)
+ bcopy(&tmp_channel_list[i], request->channels[i],
+ sizeof(struct ieee80211_channel));
+
+ request->n_channels = j;
+ is_first_init_2g_scan = true;
+ }
+ else
+ WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
+
+ WL_SCAN(("request->n_channels=%d\n", request->n_channels));
+#else /* USE_INITIAL_SHORT_DWELL_TIME */
+ is_first_init_2g_scan = true;
+#endif /* USE_INITIAL_2G_SCAN */
+ g_first_broadcast_scan = false;
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ /* if scan request is not empty parse scan request paramters */
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ wl_escan_set_sync_id(sync_id, cfg);
+ if (cfg->scan_params_v2) {
+ eparams_v2 = (wl_escan_params_v2_t *)params;
+ scan_params = (u8 *)&eparams_v2->params;
+ eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
+ eparams_v2->action = htod16(action);
+ eparams_v2->sync_id = sync_id;
+ } else {
+ eparams = (wl_escan_params_t *)params;
+ scan_params = (u8 *)&eparams->params;
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = sync_id;
+ }
+
+ if (wl_scan_prep(cfg, scan_params, params_size, request) < 0) {
+ WL_ERR(("scan_prep failed\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+ /* Override active_time to reduce scan time if it's first bradcast scan. */
+ if (is_first_init_2g_scan) {
+ if (eparams_v2) {
+ eparams_v2->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ } else {
+ eparams->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+ }
+ }
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+ wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ MFREE(cfg->osh, params, params_size);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ WL_MSG(ndev->name, "LEGACY_SCAN sync ID: %d, bssidx: %d\n", sync_id, bssidx);
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err)) {
+ if (err == BCME_EPERM)
+ /* Scan Not permitted at this point of time */
+ WL_DBG((" Escan not permitted at this time (%d)\n", err));
+ else
+ WL_ERR((" Escan set error (%d)\n", err));
+ } else {
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
+ }
+ MFREE(cfg->osh, params, params_size);
+ }
+ else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+ /* P2P SCAN TRIGGER */
+ s32 _freq = 0;
+ n_nodfs = 0;
+
+ if (request && request->n_channels) {
+ num_chans = request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ chan_mem = (u32)(num_chans * sizeof(*default_chan_list));
+ default_chan_list = MALLOCZ(cfg->osh, chan_mem);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+#ifdef P2P_SKIP_DFS
+ int is_printed = false;
+#endif /* P2P_SKIP_DFS */
+ list = (wl_uint32_list_t *) chan_buf;
+ n_valid_chan = dtoh32(list->count);
+ if (n_valid_chan > WL_NUMCHANNELS) {
+ WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
+ MFREE(cfg->osh, default_chan_list, chan_mem);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ for (i = 0; i < num_chans; i++)
+ {
+#ifdef WL_HOST_BAND_MGMT
+ int channel_band = 0;
+#endif /* WL_HOST_BAND_MGMT */
+ _freq = request->channels[i]->center_freq;
+ channel = ieee80211_frequency_to_channel(_freq);
+#ifdef WL_HOST_BAND_MGMT
+ channel_band = (channel > CH_MAX_2G_CHANNEL) ?
+ WLC_BAND_5G : WLC_BAND_2G;
+ if ((cfg->curr_band != WLC_BAND_AUTO) &&
+ (cfg->curr_band != channel_band) &&
+ !IS_P2P_SOCIAL_CHANNEL(channel))
+ continue;
+#endif /* WL_HOST_BAND_MGMT */
+
+ /* ignore DFS channels */
+ if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ (IEEE80211_CHAN_NO_IR
+ | IEEE80211_CHAN_RADAR))
+#else
+ (IEEE80211_CHAN_RADAR
+ | IEEE80211_CHAN_PASSIVE_SCAN))
+#endif // endif
+ continue;
+#ifdef P2P_SKIP_DFS
+ if (channel >= 52 && channel <= 144) {
+ if (is_printed == false) {
+ WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+ is_printed = true;
+ }
+ continue;
+ }
+#endif /* P2P_SKIP_DFS */
+
+ for (j = 0; j < n_valid_chan; j++) {
+ /* allows only supported channel on
+ * current reguatory
+ */
+ if (n_nodfs >= num_chans) {
+ break;
+ }
+ if (channel == (dtoh32(list->element[j]))) {
+ default_chan_list[n_nodfs++] =
+ channel;
+ }
+ }
+
+ }
+ }
+ if (num_chans == SOCIAL_CHAN_CNT && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ WL_DBG(("P2P SEARCH PHASE START \n"));
+ } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+ ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_DBG(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+ } else if (num_chans == 1) {
+ p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+ WL_INFORM_MEM(("Trigger p2p join scan\n"));
+ } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+ /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+ * the supplicant
+ */
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ } else {
+ WL_DBG(("P2P SCAN STATE START \n"));
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+ }
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+ err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+ p2p_scan_purpose);
+
+ if (!err)
+ cfg->p2p->search_state = search_state;
+
+ MFREE(cfg->osh, default_chan_list, chan_mem);
+ }
+exit:
+ if (unlikely(err)) {
+ /* Don't print Error incase of Scan suppress */
+ if ((err == BCME_EPERM) && cfg->scan_suppressed)
+ WL_DBG(("Escan failed: Scan Suppressed \n"));
+ else
+ WL_ERR(("scan error (%d)\n", err));
+ }
+ return err;
+}
+
+s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ s32 passive_scan_time;
+ s32 passive_scan_time_org;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+
+ results = wl_escan_get_buf(cfg, FALSE);
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
+ }
+
+ if (passive_channel_skip) {
+
+ err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+
+ passive_scan_time = 0;
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time, sizeof(passive_scan_time));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+ passive_channel_skip));
+ }
+
+ err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+
+ if (passive_channel_skip) {
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
+
+ WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+ passive_scan_time_org));
+ }
+
+exit:
+ return err;
+}
+
+static s32
+wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
+{
+ u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+
+ /* If NAN is enabled adding +10 sec to the existing timeout value */
+#ifdef WL_NAN
+ if (cfg->nan_enable) {
+ scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
+ }
+#endif /* WL_NAN */
+ WL_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
+ return scan_timer_interval_ms;
+}
+
+#define SCAN_EBUSY_RETRY_LIMIT 20
+static s32
+wl_cfgscan_handle_scanbusy(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 err)
+{
+ s32 scanbusy_err = 0;
+ static u32 busy_count = 0;
+
+ if (!err) {
+ busy_count = 0;
+ return scanbusy_err;
+ }
+ if (err == BCME_BUSY || err == BCME_NOTREADY) {
+ WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+ scanbusy_err = -EBUSY;
+ } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+ WL_ERR(("Scan not permitted due to scan suppress\n"));
+ scanbusy_err = -EPERM;
+ } else {
+ /* For all other fw errors, use a generic error code as return
+ * value to cfg80211 stack
+ */
+ scanbusy_err = -EAGAIN;
+ }
+
+ if (scanbusy_err == -EBUSY) {
+ /* Flush FW preserve buffer logs for checking failure */
+ if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
+ wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ }
+ if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
+ struct ether_addr bssid;
+ s32 ret = 0;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ if (dhd_query_bus_erros(dhdp)) {
+ return BCME_NOTREADY;
+ }
+ dhdp->scan_busy_occurred = TRUE;
+ busy_count = 0;
+ WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, AP_CREATING, ndev),
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+ dhdp->hang_reason = HANG_REASON_SCAN_BUSY;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ dhd_os_send_hang_message(dhdp);
+#else
+ WL_ERR(("%s: HANG event is unsupported\n", __FUNCTION__));
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+ bzero(&bssid, sizeof(bssid));
+ if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
+ &bssid, ETHER_ADDR_LEN)) == 0) {
+ WL_ERR(("FW is connected with " MACDBG "/n",
+ MAC2STRDBG(bssid.octet)));
+ } else {
+ WL_ERR(("GET BSSID failed with %d\n", ret));
+ }
+
+ wl_cfg80211_scan_abort(cfg);
+
+ } else {
+ /* Hold the context for 400msec, so that 10 subsequent scans
+ * can give a buffer of 4sec which is enough to
+ * cover any on-going scan in the firmware
+ */
+ WL_DBG(("Enforcing delay for EBUSY case \n"));
+ msleep(400);
+ }
+ } else {
+ busy_count = 0;
+ }
+
+ return scanbusy_err;
+}
+
+s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct ether_addr primary_mac;
+ bool p2p_ssid;
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+#endif // endif
+ s32 err = 0;
+ s32 bssidx = -1;
+ s32 i;
+ bool escan_req_failed = false;
+ s32 scanbusy_err = 0;
+
+ unsigned long flags;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ struct net_device *remain_on_channel_ndev = NULL;
+#endif // endif
+ /*
+ * Hostapd triggers scan before starting automatic channel selection
+ * to collect channel characteristics. However firmware scan engine
+ * doesn't support any channel characteristics collection along with
+ * scan. Hence return scan success.
+ */
+ if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+ WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
+// terence 20161023: let it scan in SoftAP mode
+// return 0;
+ }
+
+ ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ return -EAGAIN;
+ }
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ if (cfg->scan_request == NULL) {
+ wl_clr_drv_status_all(cfg, SCANNING);
+ WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+ } else {
+ WL_ERR(("Scanning already\n"));
+ return -EAGAIN;
+ }
+ }
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ return -EAGAIN;
+ }
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+#ifdef WL_BCNRECV
+ /* check fakeapscan in progress then abort */
+ wl_android_bcnrecv_stop(ndev, WL_BCNRECV_SCANBUSY);
+#endif /* WL_BCNRECV */
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ mutex_lock(&cfg->scan_sync);
+ remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+ if (remain_on_channel_ndev) {
+ WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+ wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+ }
+ mutex_unlock(&cfg->scan_sync);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_cancel_p2plo(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len &&
+ IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+ /* P2P Scan */
+#ifdef WL_BLOCK_P2P_SCAN_ON_STA
+ if (!(IS_P2P_IFACE(request->wdev))) {
+ /* P2P scan on non-p2p iface. Fail scan */
+ WL_ERR(("p2p_search on non p2p iface\n"));
+ goto scan_out;
+ }
+#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (cfg->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(cfg) == false) {
+ /* p2p on at the first time */
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ get_primary_mac(cfg, &primary_mac);
+#ifndef WL_P2P_USE_RANDMAC
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#endif /* WL_P2P_USE_RANDMAC */
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif // endif
+ }
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ p2p_scan(cfg) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (cfg->p2p_supported) {
+ p2p_scan(cfg) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+
+ if (p2p_scan(cfg) == false) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(cfg,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n",
+ ndev));
+ err = BCME_ERROR;
+ goto scan_out;
+ }
+#ifdef WL11U
+ if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
+ request->ie, request->ie_len)) != NULL) {
+ if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data,
+ interworking_ie->len)) != BCME_OK) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (cfg->wl11u) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
+ 0, bssidx);
+ /* we don't care about error here
+ * because the only failure case is unsupported,
+ * which is fine
+ */
+ if (unlikely(err)) {
+ WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
+ }
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+ if (request) {
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ }
+
+ if (unlikely(err)) {
+// terence 20161023: let it scan in SoftAP mode
+// goto scan_out;
+ }
+
+ }
+ }
+ } else { /* scan in ibss */
+ ssids = this_ssid;
+ }
+
+ if (request && cfg->p2p_supported) {
+ WL_TRACE_HW4(("START SCAN\n"));
+ DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+ SCAN_WAKE_LOCK_TIMEOUT);
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+ }
+
+ if (cfg->p2p_supported) {
+ if (request && p2p_on(cfg) && p2p_scan(cfg)) {
+
+ /* find my listen channel */
+ cfg->afx_hdl->my_listen_chan =
+ wl_find_listen_channel(cfg, request->ie,
+ request->ie_len);
+ err = wl_cfgp2p_enable_discovery(cfg, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+
+ mutex_lock(&cfg->scan_sync);
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (likely(!err)) {
+ goto scan_success;
+ } else {
+ escan_req_failed = true;
+ goto scan_out;
+ }
+
+scan_success:
+ wl_cfgscan_handle_scanbusy(cfg, ndev, BCME_OK);
+ cfg->scan_request = request;
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ /* Arm the timer */
+ mod_timer(&cfg->scan_timeout,
+ jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
+ mutex_unlock(&cfg->scan_sync);
+ return 0;
+
+scan_out:
+ if (escan_req_failed) {
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->scan_request = NULL;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ mutex_unlock(&cfg->scan_sync);
+ /* Handling for scan busy errors */
+ scanbusy_err = wl_cfgscan_handle_scanbusy(cfg, ndev, err);
+ if (scanbusy_err == BCME_NOTREADY) {
+ /* In case of bus failures avoid ioctl calls */
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ return -ENODEV;
+ }
+ err = scanbusy_err;
+ }
+
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ return err;
+}
+
+s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+ s32 err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+ WL_DBG(("Enter\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+#ifdef WL_CFG80211_P2P_DEV_IF
+ PRINT_WDEV_INFO(request->wdev);
+#else
+ PRINT_WDEV_INFO(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* DHD_IFDEBUG */
+
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_cfg_multip2p_operational(cfg)) {
+ WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+ return -ENODEV;
+ }
+ }
+ err = wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY,
+ WL_EXT_STATUS_SCAN, NULL);
+ if (err)
+ return err;
+
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ }
+#ifdef WL_DRV_AVOID_SCANCACHE
+ /* Reset roam cache after successful scan request */
+#ifdef ROAM_CHANNEL_CACHE
+ if (!err) {
+ reset_roam_cache(cfg);
+ }
+#endif /* ROAM_CHANNEL_CACHE */
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ return err;
+}
+
+/* Note: This API should be invoked with scan_sync mutex
+ * held so that scan_request data structures doesn't
+ * get modified in between.
+ */
+struct wireless_dev *
+wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+
+ if (!cfg) {
+ WL_ERR(("cfg ptr null\n"));
+ return NULL;
+ }
+
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_MEM(("no scan in progress \n"));
+ return NULL;
+ }
+
+ if (cfg->scan_request) {
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+#ifdef WL_SCHED_SCAN
+ } else if (cfg->sched_scan_req) {
+ wdev = GET_SCHED_SCAN_WDEV(cfg->sched_scan_req);
+#endif /* WL_SCHED_SCAN */
+ } else {
+ WL_MEM(("no scan in progress \n"));
+ }
+
+ return wdev;
+}
+
+void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+
+ mutex_lock(&cfg->scan_sync);
+ if (!cfg->scan_request && !cfg->sched_scan_req) {
+ /* No scans in progress */
+ WL_INFORM_MEM(("No scan in progress\n"));
+ goto exit;
+ }
+
+ wdev = wl_get_scan_wdev(cfg);
+ if (!wdev) {
+ WL_ERR(("No wdev present\n"));
+ goto exit;
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ WL_INFORM_MEM(("Scan aborted! \n"));
+exit:
+ mutex_unlock(&cfg->scan_sync);
+}
+
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+ void *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ u32 channel, channel_num;
+
+ if (!in_atomic()) {
+ /* Abort scan params only need space for 1 channel and 0 ssids */
+ if (cfg->scan_params_v2) {
+ params_size = WL_SCAN_PARAMS_V2_FIXED_SIZE + 1 * sizeof(uint16);
+ } else {
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ }
+ params = MALLOCZ(cfg->osh, params_size);
+ if (params == NULL) {
+ WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+ return;
+ }
+
+ /* Use magic value of channel=-1 to abort scan */
+ channel = htodchanspec(-1);
+ channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (1 & WL_SCAN_PARAMS_COUNT_MASK));
+ if (cfg->scan_params_v2) {
+ wl_scan_params_v2_t *params_v2 = (wl_scan_params_v2_t *)params;
+ params_v2->channel_list[0] = channel;
+ params_v2->channel_num = channel_num;
+ } else {
+ wl_scan_params_t *params_v1 = (wl_scan_params_t *)params;
+ params_v1->channel_list[0] = channel;
+ params_v1->channel_num = channel_num;
+ }
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
+ if (err < 0) {
+ /* scan abort can fail if there is no outstanding scan */
+ WL_DBG(("scan abort failed. ret:%d\n", err));
+ }
+ MFREE(cfg->osh, params, params_size);
+ }
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_frame_len = 0;
+ }
+#endif /* WLTDLS */
+}
+
+s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_DBG(("Enter \n"));
+ BCM_REFERENCE(dhdp);
+
+ if (!ndev) {
+ WL_ERR(("ndev is null\n"));
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ if (cfg->escan_info.ndev != ndev) {
+ WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
+ cfg->escan_info.ndev, ndev));
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ if (cfg->scan_request) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->scan_request->dev != cfg->p2p_net)
+ dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ dev = cfg->scan_request->wdev->netdev;
+#endif // endif
+ }
+ else {
+ WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
+ "doing scan_abort for ndev %p primary %p",
+ ndev, bcmcfg_to_prmry_ndev(cfg)));
+ dev = ndev;
+ }
+ if (fw_abort && !in_atomic())
+ wl_cfg80211_scan_abort(cfg);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+ cfg->scan_enq_time = 0;
+#if defined(ESCAN_RESULT_PATCH)
+ if (likely(cfg->scan_request)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (aborted && p2p_scan(cfg) &&
+ (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
+ WL_ERR(("scan list is changed"));
+ cfg->bss_list = wl_escan_get_buf(cfg, !aborted);
+ } else
+#endif // endif
+ cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+
+ wl_inform_bss(cfg);
+ }
+#endif /* ESCAN_RESULT_PATCH */
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+ if (cfg->sched_scan_req && !cfg->scan_request) {
+ if (!aborted) {
+ WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy,
+ cfg->sched_scan_req->reqid);
+#else
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+#endif /* LINUX_VER > 4.11 */
+ }
+
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
+ cfg->sched_scan_running = FALSE;
+ cfg->sched_scan_req = NULL;
+ }
+#endif /* WL_SCHED_SCAN */
+ if (likely(cfg->scan_request)) {
+ WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
+ /* scan_sync mutex is already held */
+ _wl_notify_scan_done(cfg, aborted);
+ cfg->scan_request = NULL;
+ }
+ if (p2p_is_on(cfg))
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+out:
+ return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+void
+wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct bcm_cfg80211 *cfg;
+
+ WL_DBG(("Enter wl_cfg80211_abort_scan\n"));
+ cfg = wiphy_priv(wdev->wiphy);
+
+ /* Check if any scan in progress only then abort */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_cfg80211_scan_abort(cfg);
+ /* Only scan abort is issued here. As per the expectation of abort_scan
+ * the status of abort is needed to be communicated using cfg80211_scan_done call.
+ * Here we just issue abort request and let the scan complete path to indicate
+ * abort to cfg80211 layer.
+ */
+ WL_DBG(("wl_cfg80211_abort_scan: Scan abort issued to FW\n"));
+ }
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
+{
+ int ret = 0;
+
+ WL_TRACE(("Enter\n"));
+
+ if (!cfg || !cfgdev) {
+ return -EINVAL;
+ }
+
+ /* cancel scan and notify scan status */
+ wl_cfg80211_cancel_scan(cfg);
+
+ return ret;
+}
+
+/* This API is just meant as a wrapper for cfg80211_scan_done
+ * API. This doesn't do state mgmt. For cancelling scan,
+ * please use wl_cfg80211_cancel_scan API.
+ */
+static void
+_wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif // endif
+
+ if (!cfg->scan_request) {
+ return;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ memset_s(&info, sizeof(struct cfg80211_scan_info), 0, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif // endif
+ cfg->scan_request = NULL;
+}
+
+#ifdef WL_DRV_AVOID_SCANCACHE
+static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
+ u32 bi_length)
+{
+ u32 ret;
+ u8 *p2p_dev_addr = NULL;
+
+ ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
+ if (!ret) {
+ return ret;
+ }
+ if (status == WLC_E_STATUS_PARTIAL) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0)) {
+ channel = WL_INVALID;
+ } else {
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
+ }
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ }
+ } else {
+ WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ }
+
+ return ret;
+}
+
+static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
+ struct net_device *ndev, const wl_event_msg_t *e, s32 status)
+{
+ s32 err = BCME_OK;
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ bool aborted = false;
+ bool fw_abort = false;
+ bool notify_escan_complete = false;
+
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0) {
+ goto exit;
+ }
+
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+
+ if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+
+ if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ notify_escan_complete = true;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
+ WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
+ goto exit;
+ }
+
+ bi = escan_result->bss_info;
+ bi_length = dtoh32(bi->length);
+ if ((!bi) ||
+ (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)"
+ "or invalid bss_info length\n"));
+ goto exit;
+ }
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
+ goto exit;
+ } else {
+ if (scan_req_match(cfg)) {
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ }
+#ifdef ROAM_CHANNEL_CACHE
+ add_roam_cache(cfg, bi);
+#endif /* ROAM_CHANNEL_CACHE */
+ err = wl_inform_single_bss(cfg, bi, false);
+#ifdef ROAM_CHANNEL_CACHE
+ /* print_roam_cache(); */
+ update_roam_cache(cfg, ioctl_version);
+#endif /* ROAM_CHANNEL_CACHE */
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ aborted = false;
+ fw_abort = true;
+ }
+ /* Directly exit from function here and
+ * avoid sending notify completion to cfg80211
+ */
+ goto exit;
+ }
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+
+ /* Update escan complete status */
+ aborted = false;
+ fw_abort = false;
+
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM_MEM(("ESCAN ABORTED\n"));
+
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = true;
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
+ }
+
+ /* Notify escan complete status */
+ if (notify_escan_complete) {
+ wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
+ }
+
+exit:
+ return err;
+
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+
+s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ struct net_device *ndev = NULL;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+ WL_DBG(("scan is not ready \n"));
+ return err;
+ }
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->scan_sync);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ bzero(&channel_inform, sizeof(channel_inform));
+ err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform));
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ cfg->bss_list = cfg->scan_results;
+ bss_list = cfg->bss_list;
+ bzero(bss_list, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
+ if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(cfg);
+
+scan_done_out:
+ del_timer_sync(&cfg->scan_timeout);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ _wl_notify_scan_done(cfg, false);
+ cfg->scan_request = NULL;
+ }
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ WL_DBG(("cfg80211_scan_done\n"));
+ mutex_unlock(&cfg->scan_sync);
+ return err;
+}
+
+void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+
+ bzero(&info, sizeof(struct cfg80211_scan_info));
+ info.aborted = aborted;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif // endif
+}
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME 30
+#define PNO_REPEAT 4
+#define PNO_FREQ_EXPO_MAX 2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+ int i;
+
+ if (!ssid || !ssid_list)
+ return FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (ssid->ssid_len == ssid_list[i].ssid_len) {
+ if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ ushort pno_time = PNO_TIME;
+ int pno_repeat = PNO_REPEAT;
+ int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ struct cfg80211_ssid *ssid = NULL;
+ struct cfg80211_ssid *hidden_ssid_list = NULL;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len, tlv_len;
+ u32 payload_len;
+ int ssid_cnt = 0;
+ int i;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!request) {
+ WL_ERR(("Sched scan request was NULL\n"));
+ return -EINVAL;
+ }
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN START\n"));
+ WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
+ request->n_match_sets, request->n_ssids));
+ WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+ request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+ if (!request->n_ssids || !request->n_match_sets) {
+ WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+ return -EINVAL;
+ }
+
+ bzero(&ssids_local, sizeof(ssids_local));
+
+ if (request->n_ssids > 0) {
+ hidden_ssid_list = request->ssids;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ return -ENOMEM;
+ }
+ bzero(event_data, alloc_len);
+ event_data->tlvs = NULL;
+ tlv_len = sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate log_tlv with "
+ "length(%d)\n", __func__, tlv_len));
+ MFREE(cfg->osh, event_data, alloc_len);
+ return -ENOMEM;
+ }
+ }
+ for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+ ssid = &request->match_sets[i].ssid;
+ /* No need to include null ssid */
+ if (ssid->ssid_len) {
+ ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ /* In previous step max SSID_len is limited to DOT11_MAX_SSID_LEN,
+ * returning void
+ */
+ (void)memcpy_s(ssids_local[ssid_cnt].SSID, DOT11_MAX_SSID_LEN, ssid->ssid,
+ ssids_local[ssid_cnt].SSID_len);
+ if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+ ssids_local[ssid_cnt].hidden = TRUE;
+ WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+ } else {
+ ssids_local[ssid_cnt].hidden = FALSE;
+ WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
+ if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+ ssids_local[ssid_cnt].rssi_thresh =
+ (int8)request->match_sets[i].rssi_thold;
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
+ ssid_cnt++;
+ }
+ }
+
+ if (ssid_cnt) {
+ if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
+ pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+ WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ for (i = 0; i < ssid_cnt; i++) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
+ tlv_data = event_data->tlvs;
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssids_local[i].SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssids_local[i].SSID, ssids_local[i].SSID_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ event_data, payload_len);
+ }
+ }
+
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = request;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ } else {
+ ret = -EINVAL;
+ }
+exit:
+ if (event_data) {
+ MFREE(cfg->osh, event_data->tlvs, tlv_len);
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return ret;
+}
+
+int
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
+#else
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+#endif /* LINUX_VER > 4.11 */
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN STOP\n"));
+
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ } else {
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
+ }
+
+ if (cfg->sched_scan_req || cfg->sched_scan_running) {
+ WL_PNO((">>> Sched scan running. Aborting it..\n"));
+ wl_cfg80211_cancel_scan(cfg);
+ }
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+static void wl_scan_timeout(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+ struct wl_scan_results *bss_list;
+ wl_bss_info_t *bi = NULL;
+ s32 i;
+ u32 channel;
+ u64 cur_time = OSL_LOCALTIME_NS();
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ unsigned long flags;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status = NULL;
+ UNUSED_PARAMETER(rtt_status);
+#endif /* RTT_SUPPORT */
+
+ UNUSED_PARAMETER(cur_time);
+ WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ if (!(cfg->scan_request)) {
+ WL_ERR(("timer expired but no scan request\n"));
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return;
+ }
+
+ wdev = GET_SCAN_WDEV(cfg->scan_request);
+ WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+
+ if (dhd_query_bus_erros(dhdp)) {
+ return;
+ }
+#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON &&
+ ((cfg->scan_deq_time < cfg->scan_enq_time) ||
+ dhd_bus_query_dpc_sched_errors(dhdp))) {
+ WL_ERR(("****SCAN event timeout due to scheduling problem\n"));
+ /* change g_assert_type to trigger Kernel panic */
+ g_assert_type = 2;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhdp);
+#endif /* RTT_SUPPORT */
+ WL_ERR(("***SCAN event timeout. WQ state:0x%x scan_enq_time:"SEC_USEC_FMT
+ " evt_hdlr_entry_time:"SEC_USEC_FMT" evt_deq_time:"SEC_USEC_FMT
+ "\nscan_deq_time:"SEC_USEC_FMT" scan_hdlr_cmplt_time:"SEC_USEC_FMT
+ " scan_cmplt_time:"SEC_USEC_FMT" evt_hdlr_exit_time:"SEC_USEC_FMT
+ "\ncurrent_time:"SEC_USEC_FMT"\n", work_busy(&cfg->event_work),
+ GET_SEC_USEC(cfg->scan_enq_time), GET_SEC_USEC(cfg->wl_evt_hdlr_entry_time),
+ GET_SEC_USEC(cfg->wl_evt_deq_time), GET_SEC_USEC(cfg->scan_deq_time),
+ GET_SEC_USEC(cfg->scan_hdlr_cmplt_time), GET_SEC_USEC(cfg->scan_cmplt_time),
+ GET_SEC_USEC(cfg->wl_evt_hdlr_exit_time), GET_SEC_USEC(cur_time)));
+ if (cfg->scan_enq_time) {
+ WL_ERR(("Elapsed time(ns): %llu\n", (cur_time - cfg->scan_enq_time)));
+ }
+ WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
+ mutex_is_locked(&cfg->if_sync),
+ mutex_is_locked(&cfg->usr_sync),
+ mutex_is_locked(&cfg->pm_sync),
+ mutex_is_locked(&cfg->scan_sync),
+ spin_is_locked(&cfg->cfgdrv_lock),
+ spin_is_locked(&cfg->eq_lock)));
+#ifdef RTT_SUPPORT
+ WL_ERR(("RTT lock_state:[%d]\n",
+ mutex_is_locked(&rtt_status->rtt_mutex)));
+#ifdef WL_NAN
+ WL_ERR(("RTT and Geofence lock_states:[%d:%d]\n",
+ mutex_is_locked(&cfg->nancfg.nan_sync),
+ mutex_is_locked(&(rtt_status)->geofence_mutex)));
+#endif /* WL_NAN */
+#endif /* RTT_SUPPORT */
+
+ /* use ASSERT() to trigger panic */
+ ASSERT(0);
+ }
+#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
+ dhd_bus_intr_count_dump(dhdp);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
+ /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
+ show_workqueue_state();
+#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
+
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("Dump scan buffer:\n"
+ "scanned AP count (%d)\n", bss_list->count));
+
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ }
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ bzero(&msg, sizeof(wl_event_msg_t));
+ WL_ERR(("timer expired\n"));
+ dhdp->scan_timeout_occurred = TRUE;
+#ifdef BCMPCIE
+ (void)dhd_pcie_dump_int_regs(dhdp);
+ dhd_pcie_dump_rc_conf_space_cap(dhdp);
+#endif /* BCMPCIE */
+#if 0
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+ dhd_bus_mem_dump(dhdp);
+ }
+ /*
+ * For the memdump sanity, blocking bus transactions for a while
+ * Keeping it TRUE causes the sequential private cmd error
+ */
+ dhdp->scan_timeout_occurred = FALSE;
+#endif
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_cfg80211_event(ndev, &msg, NULL);
+}
+
+s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
+
+ cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_init_sync_id(cfg);
+
+ /* Init scan_timeout timer */
+ init_timer_compat(&cfg->scan_timeout, wl_scan_timeout, cfg);
+
+ wl_cfg80211_set_bcmcfg(cfg);
+
+ return err;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND 0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
+ wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int err = 0;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+ struct ieee80211_channel *channel = NULL;
+ int channel_req = 0;
+ int band = 0;
+ wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
+ wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
+ int n_pfn_results = 0;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len, tlv_len;
+ u32 payload_len;
+ u8 tmp_buf[DOT11_MAX_SSID_LEN + 1];
+
+ WL_DBG(("Enter\n"));
+
+ /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
+ * in size and SSID offset, allowing v1 to be used below except for the results
+ * fields themselves (status, count, offset to netinfo).
+ */
+ STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
+ STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
+ STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
+ OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
+
+ /* Extract the version-specific items */
+ if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
+ n_pfn_results = pfn_result_v1->count;
+ pnetinfo = pfn_result_v1->netinfo;
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
+ sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ tlv_len = 3 * sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate the tlv_log with "
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo = &pnetinfo[i];
+ if (!netinfo) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ if (netinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ WL_ERR(("Wrong SSID length:%d\n",
+ netinfo->pfnsubnet.SSID_len));
+ err = -EINVAL;
+ goto out_err;
+ }
+ /* In previous step max SSID_len limited to DOT11_MAX_SSID_LEN
+ * and tmp_buf size is DOT11_MAX_SSID_LEN+1
+ */
+ (void)memcpy_s(tmp_buf, DOT11_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.SSID_len);
+ tmp_buf[netinfo->pfnsubnet.SSID_len] = '\0';
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ tmp_buf, netinfo->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by
+ * the supplicant. (For e.g IEs) Do a target Escan so that
+ * sched scan results are reported via wl_inform_single_bss in
+ * the required format. Escan does require the scan request in
+ * the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+
+ ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
+ /* Returning void as ssid[i].ssid_len is limited to max of
+ * DOT11_MAX_SSID_LEN
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo->pfnsubnet.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssid[i].ssid_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(int16),
+ &netinfo->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ /* Abort any on-going scan */
+ wl_cfg80211_cancel_scan(cfg);
+ }
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+ wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+ WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif // endif
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+ cfg->sched_scan_running = TRUE;
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+
+ } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
+ n_pfn_results = pfn_result_v2->count;
+ pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
+
+ if (e->event_type == WLC_E_PFN_NET_LOST) {
+ WL_PNO(("Do Nothing %d\n", e->event_type));
+ return 0;
+ }
+
+ WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
+
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+
+ bzero(&ssid, sizeof(ssid));
+
+ request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
+ sizeof(uint16) + sizeof(int16);
+ event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ tlv_len = 3 * sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate the tlv_log with "
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo_v2 = &pnetinfo_v2[i];
+ if (!netinfo_v2) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ netinfo_v2->pfnsubnet.u.SSID,
+ netinfo_v2->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by the
+ * supplicant. (For e.g IEs) Do a target Escan so that sched scan
+ * results are reported via wl_inform_single_bss in the required
+ * format. Escan does require the scan request in the form of
+ * cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+ ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.SSID_len);
+ /* max ssid_len as in previous step DOT11_MAX_SSID_LEN is same
+ * as DOT11_MAX_SSID_LEN = 32
+ */
+ (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
+ netinfo_v2->pfnsubnet.u.SSID, ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo_v2->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
+ (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
+ ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ (void)memcpy_s(tlv_data->value, sizeof(uint16),
+ &netinfo_v2->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ /* Abort any on-going scan */
+ wl_cfg80211_cancel_scan(cfg);
+ }
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+
+ wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+ WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif // endif
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+ cfg->sched_scan_running = TRUE;
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+ } else {
+ WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
+ PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
+ return 0;
+ }
+out_err:
+ if (request) {
+ MFREE(cfg->osh, request,
+ sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
+ }
+ if (channel) {
+ MFREE(cfg->osh, channel,
+ (sizeof(struct ieee80211_channel) * n_pfn_results));
+ }
+
+ if (event_data) {
+ if (event_data->tlvs) {
+ MFREE(cfg->osh, event_data->tlvs, tlv_len);
+ }
+ MFREE(cfg->osh, event_data, alloc_len);
+ }
+ return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef PNO_SUPPORT
+s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+#ifdef GSCAN_SUPPORT
+ void *ptr;
+ int send_evt_bytes = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* GSCAN_SUPPORT */
+
+ WL_INFORM_MEM((">>> PNO Event\n"));
+
+ if (!data) {
+ WL_ERR(("Data received is NULL!\n"));
+ return 0;
+ }
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef GSCAN_SUPPORT
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ }
+ if (!dhd_dev_is_legacy_pno_enabled(ndev))
+ return 0;
+#endif /* GSCAN_SUPPORT */
+
+#ifndef WL_SCHED_SCAN
+ mutex_lock(&cfg->usr_sync);
+ /* TODO: Use cfg80211_sched_scan_results(wiphy); */
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ mutex_unlock(&cfg->usr_sync);
+#else
+ /* If cfg80211 scheduled scan is supported, report the pno results via sched
+ * scan results
+ */
+ wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+ return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ void *ptr = NULL;
+ int send_evt_bytes = 0;
+ int event_type;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 len = ntoh32(e->datalen);
+ u32 buf_len = 0;
+
+ switch (event) {
+ case WLC_E_PFN_BEST_BATCHING:
+ err = dhd_dev_retrieve_batch_scan(ndev);
+ if (err < 0) {
+ WL_ERR(("Batch retrieval already in progress %d\n", err));
+ } else {
+ event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+ if (data && len) {
+ event_type = *((int *)data);
+ }
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ &event_type, sizeof(int));
+ }
+ break;
+ case WLC_E_PFN_SCAN_COMPLETE:
+ event_type = WIFI_SCAN_COMPLETE;
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ &event_type, sizeof(int));
+ break;
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_FOUND, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+ * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+ */
+ if (len) {
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_LOST, &buf_len);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+ MFREE(cfg->osh, ptr, buf_len);
+ } else {
+ err = -ENOMEM;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_SSID_EXT:
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ MFREE(cfg->osh, ptr, send_evt_bytes);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown event %d\n", event));
+ break;
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (strcmp(command, "SCAN-ACTIVE") == 0) {
+ cfg->active_scan = 1;
+ } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+ cfg->active_scan = 0;
+ } else
+ WL_ERR(("Unknown command \n"));
+ return;
+}
--- /dev/null
+/*
+ * Header for Linux cfg80211 scan
+ *
+ * Copyright (C) 1999-2019, Broadcom.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _wl_cfgscan_h_
+#define _wl_cfgscan_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+#include <osl.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define GET_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#else
+#define GET_SCAN_WDEV(scan_request) \
+ scan_request ? scan_request->wdev : NULL;
+#endif // endif
+#ifdef WL_SCHED_SCAN
+#define GET_SCHED_SCAN_WDEV(scan_request) \
+ (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
+#endif /* WL_SCHED_SCAN */
+
+extern s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern s32 wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy,
+ struct net_device *ndev, struct cfg80211_scan_request *request);
+extern s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+extern void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+extern s32 wl_init_scan(struct bcm_cfg80211 *cfg);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+extern s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
+#ifdef PNO_SUPPORT
+extern s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+extern s32 wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+
+#ifdef WL_SCHED_SCAN
+extern int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_sched_scan_request *request);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
+extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid);
+#else
+extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif /* LINUX_VER > 4.11 */
+#endif /* WL_SCHED_SCAN */
+extern void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
+#endif /* _wl_cfgscan_h_ */
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.c 769910 2018-06-28 12:19:16Z $
+ * $Id: wl_cfgvendor.c 825970 2019-06-18 05:28:31Z $
*/
/*
#include <linux/vmalloc.h>
#include <bcmutils.h>
-#include <bcmstdlib_s.h>
#include <bcmwifi_channels.h>
#include <bcmendian.h>
#include <ethernet.h>
char*
wl_get_kernel_timestamp(void)
{
- static char buf[16];
+ static char buf[32];
u64 ts_nsec;
unsigned long rem_nsec;
goto exit;
}
- nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+ MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
for (i = 0; i < MAX_FEATURE_SET_CONCURRRENT_GROUPS; i++) {
reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), i);
if (reply != WIFI_FEATURE_INVALID) {
- nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply);
+ err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+ reply);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto exit;
+ }
}
}
wl_cfgvendor_set_rand_mac_oui(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = 0;
+ int err = -EINVAL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ goto exit;
+ }
+
type = nla_type(data);
if (type == ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI) {
if (nla_len(data) != DOT11_OUI_LEN) {
WL_ERR(("nla_len not matched.\n"));
- err = -EINVAL;
goto exit;
}
err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), nla_data(data));
if (unlikely(err))
WL_ERR(("Bad OUI, could not set:%d \n", err));
-
- } else {
- err = -EINVAL;
}
exit:
return err;
wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = 0;
+ int err = -EINVAL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
u32 nodfs;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
type = nla_type(data);
if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
nodfs = nla_get_u32(data);
err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
- } else {
- err = -1;
}
+
return err;
}
#endif /* CUSTOM_FORCE_NODFS_FLAG */
}
err = wl_cfgvendor_send_cmd_reply(wiphy, reply, reply_len);
-
if (unlikely(err)) {
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
}
}
results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
if (!results) {
WL_ERR(("No results to send %d\n", err));
err = wl_cfgvendor_send_cmd_reply(wiphy, results, 0);
iter = results;
complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
sizeof(is_done));
+
+ if (unlikely(!complete_flag)) {
+ WL_ERR(("complete_flag could not be reserved"));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return -ENOMEM;
+ }
mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
while (iter) {
is_done = 0;
break;
}
- nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
- nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
- nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
-
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
+ if (unlikely(err)) {
+ goto fail;
+ }
num_results_iter = iter->tot_count - iter->tot_consumed;
- nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ err = nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ if (unlikely(err)) {
+ goto fail;
+ }
if (num_results_iter) {
ptr = &iter->results[iter->tot_consumed];
- iter->tot_consumed += num_results_iter;
- nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+ err = nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+ if (unlikely(err)) {
+ goto fail;
+ }
+ iter->tot_consumed += num_results_iter;
}
nla_nest_end(skb, scan_hdr);
mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
(num_results_iter * sizeof(wifi_gscan_result_t));
iter = iter->next;
}
- MFREE(cfg->osh, results, reply_len);
- /* Returns TRUE if all result consumed */
+ /* Cleans up consumed results and returns TRUE if all results are consumed */
is_done = dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
memcpy(nla_data(complete_flag), &is_done, sizeof(is_done));
dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
return cfg80211_vendor_cmd_reply(skb);
+fail:
+ /* Free up consumed results which will now not be sent */
+ (void)dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+ kfree_skb(skb);
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return err;
}
static int
int type;
bool real_time = FALSE;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
uint16 *reply = NULL;
uint32 reply_len = 0, num_channels, mem_needed;
struct sk_buff *skb;
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = wdev->netdev;
- type = nla_type(data);
+ if (!ndev) {
+ WL_ERR(("ndev null\n"));
+ return -EINVAL;
+ }
+
+ dhdp = wl_cfg80211_get_dhdp(ndev);
+ if (!dhdp) {
+ WL_ERR(("dhdp null\n"));
+ return -EINVAL;
+ }
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+ type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_BAND) {
band = nla_get_u32(data);
} else {
return -EINVAL;
}
- reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ reply = dhd_pno_get_gscan(dhdp,
DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
-
if (!reply) {
WL_ERR(("Could not get channel list\n"));
err = -EINVAL;
}
#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
-static int
-wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int err = BCME_OK, type;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
- uint8 enable = 0;
-
- if (len <= 0) {
- WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
- err = BCME_BADARG;
- goto exit;
- }
-
- type = nla_type(data);
- if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
- enable = (uint8)nla_get_u32(data);
- err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
- if (unlikely(err)) {
- WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
- }
- } else {
- err = BCME_BADARG;
- }
-
-exit:
- return err;
-}
-#endif /* DHDTCPACK_SUPPRESS */
-
#ifdef DHD_WAKE_STATUS
static int
wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
#if defined(DHD_DEBUG) && defined(DHD_WAKE_EVENT_STATUS)
int flowid;
#endif /* DHD_DEBUG && DHD_WAKE_EVENT_STATUS */
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
WL_DBG(("Recv get wake status info cmd.\n"));
}
#ifdef DHD_WAKE_EVENT_STATUS
WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
- nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total count of CMD event, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Max count of event used, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
pwake_count_info->rc_event);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Event wake data, ret=%d\n", ret));
+ goto exit;
+ }
#ifdef DHD_DEBUG
for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
if (pwake_count_info->rc_event[flowid] != 0) {
#endif /* DHD_WAKE_EVENT_STATUS */
#ifdef DHD_WAKE_RX_STATUS
WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total Wake due RX data, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX unicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due RX multicast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX broadcast, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMP pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due ICMPV6 pkt, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_RA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NA, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to ICMPV6_NS, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv4);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV4 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv6);
- nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to RX IPV6 MULTICAST, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_other);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put Total wake due to Other RX Multicast, ret=%d\n", ret));
+ goto exit;
+ }
#endif /* #ifdef DHD_WAKE_RX_STATUS */
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("Vendor cmd reply for -get wake status failed:%d \n", ret));
}
+ /* On cfg80211_vendor_cmd_reply() skb is consumed and freed in case of success or failure */
+ return ret;
+
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
#endif /* DHD_WAKE_STATUS */
+#ifdef DHDTCPACK_SUPPRESS
+static int
+wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = BCME_OK, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ uint8 enable = 0;
+
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
+ enable = (uint8) nla_get_u32(data);
+ err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
+ if (unlikely(err)) {
+ WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
+ }
+ } else {
+ err = BCME_BADARG;
+ }
+
+exit:
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
+static int
+wl_cfgvendor_notify_dump_completion(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ unsigned long flags = 0;
+
+ WL_INFORM(("%s, [DUMP] received file dump notification from HAL\n", __FUNCTION__));
+
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ /* call wmb() to synchronize with the previous memory operations */
+ OSL_SMP_WMB();
+ DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhd_pub);
+ /* Call another wmb() to make sure wait_for_dump_completion value
+ * gets updated before waking up waiting context.
+ */
+ OSL_SMP_WMB();
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+
+#if defined(WL_CFG80211)
+static int
+wl_cfgvendor_set_hal_started(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ WL_INFORM(("%s,[DUMP] HAL STARTED\n", __FUNCTION__));
+
+ cfg->hal_started = true;
+ return BCME_OK;
+}
+
+static int
+wl_cfgvendor_stop_hal(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ WL_INFORM(("%s,[DUMP] HAL STOPPED\n", __FUNCTION__));
+
+ cfg->hal_started = false;
+ return BCME_OK;
+}
+#endif /* WL_CFG80211 */
+
#ifdef RTT_SUPPORT
void
wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
{
struct wireless_dev *wdev = (struct wireless_dev *)ctx;
struct wiphy *wiphy;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
uint32 evt_complete = 0;
gfp_t kflags;
rtt_result_t *rtt_result;
rtt_results_header_t *rtt_header;
struct list_head *rtt_cache_list;
struct nlattr *rtt_nl_hdr;
+ int ret = BCME_OK;
wiphy = wdev->wiphy;
WL_DBG(("In\n"));
return;
}
evt_complete = 1;
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
cfg80211_vendor_event(skb, kflags);
return;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
list_for_each_entry(rtt_header, rtt_cache_list, list) {
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
if (list_is_last(&rtt_header->list, rtt_cache_list)) {
evt_complete = 1;
}
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto free_mem;
+ }
rtt_nl_hdr = nla_nest_start(skb, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
if (!rtt_nl_hdr) {
WL_ERR(("rtt_nl_hdr is NULL\n"));
+ dev_kfree_skb_any(skb);
break;
}
- nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &rtt_header->peer_mac);
- nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+ ret = nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN,
+ &rtt_header->peer_mac);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT, ret:%d\n", ret));
+ goto free_mem;
+ }
list_for_each_entry(rtt_result, &rtt_header->result_list, list) {
- nla_put(skb, RTT_ATTRIBUTE_RESULT,
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT,
rtt_result->report_len, &rtt_result->report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT, ret:%d\n", ret));
+ goto free_mem;
+ }
+ ret = nla_put(skb, RTT_ATTRIBUTE_RESULT_DETAIL,
+ rtt_result->detail_len, &rtt_result->rtt_detail);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_DETAIL, ret:%d\n",
+ ret));
+ goto free_mem;
+ }
}
nla_nest_end(skb, rtt_nl_hdr);
cfg80211_vendor_event(skb, kflags);
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
+
+ return;
+
+free_mem:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
}
static int
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
rtt_capabilities_t capability;
- memset(&rtt_param, 0, sizeof(rtt_param));
+ bzero(&rtt_param, sizeof(rtt_param));
WL_DBG(("In\n"));
err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
type = nla_type(iter);
switch (type) {
case RTT_ATTRIBUTE_TARGET_CNT:
+ if (target_cnt != 0) {
+ WL_ERR(("attempt to overwrite target_cnt"));
+ err = -EINVAL;
+ goto exit;
+ }
target_cnt = nla_get_u8(iter);
if ((target_cnt <= 0) || (target_cnt > RTT_MAX_TARGET_CNT)) {
WL_ERR(("target_cnt is not valid : %d\n",
}
rtt_target = rtt_param.target_info;
nla_for_each_nested(iter1, iter, rem1) {
+ if ((uint8 *)rtt_target >= ((uint8 *)rtt_param.target_info +
+ TARGET_INFO_SIZE(target_cnt))) {
+ WL_ERR(("rtt_target increased over its max size"));
+ err = -EINVAL;
+ goto exit;
+ }
nla_for_each_nested(iter2, iter1, rem2) {
type = nla_type(iter2);
switch (type) {
case RTT_ATTRIBUTE_TARGET_MAC:
+ if (nla_len(iter2) != ETHER_ADDR_LEN) {
+ WL_ERR(("mac_addr length not match\n"));
+ err = -EINVAL;
+ goto exit;
+ }
memcpy(&rtt_target->addr, nla_data(iter2),
ETHER_ADDR_LEN);
break;
if (rtt_param.target_info) {
MFREE(cfg->osh, rtt_param.target_info,
TARGET_INFO_SIZE(target_cnt));
- rtt_param.target_info = NULL;
}
return err;
}
}
}
cancel:
- if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+ if (mac_list && dhd_dev_rtt_cancel_cfg(
+ bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
WL_ERR(("Could not cancel RTT configuration\n"));
err = -EINVAL;
}
WL_DBG(("Recv -get_avail_ch command \n"));
- memset(&responder_info, 0, sizeof(responder_info));
+ bzero(&responder_info, sizeof(responder_info));
err = get_responder_info(cfg, &responder_info);
if (unlikely(err)) {
WL_ERR(("Failed to get responder info:%d \n", err));
WL_DBG(("Recv rtt -enable_resp cmd.\n"));
- memset(&responder_info, 0, sizeof(responder_info));
+ bzero(&responder_info, sizeof(responder_info));
/*
*Passing channel as NULL until implementation
int type;
uint32 lazy_roam_enable_flag;
- type = nla_type(data);
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invaild len %d\n", len));
+ return -EINVAL;
+ }
+
+ type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE) {
lazy_roam_enable_flag = nla_get_u32(data);
if (unlikely(err))
WL_ERR(("Could not enable lazy roam:%d \n", err));
}
+
return err;
}
wlc_roam_exp_params_t roam_param;
const struct nlattr *iter;
- memset(&roam_param, 0, sizeof(roam_param));
+ bzero(&roam_param, sizeof(roam_param));
nla_for_each_attr(iter, data, len, tmp) {
type = nla_type(iter);
WL_ERR(("not matching ssid count:%d to expected:%d\n",
ssid_whitelist->ssid_count, num));
err = -EINVAL;
+ goto exit;
}
err = dhd_dev_set_whitelist_ssid(bcmcfg_to_prmry_ndev(cfg),
ssid_whitelist, mem_needed, flush);
+ if (err == BCME_UNSUPPORTED) {
+ /* If firmware doesn't support feature, ignore the error
+ * Android framework doesn't populate/use whitelist ssids
+ * as of now, but invokes whitelist as part of roam config
+ * API. so this handler cannot be compiled out. but its
+ * safe to ignore.
+ */
+ WL_ERR(("whilelist ssid not supported. Ignore."));
+ err = BCME_OK;
+ }
exit:
MFREE(cfg->osh, ssid_whitelist, mem_needed);
return err;
int type;
int err = 0;
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return -EINVAL;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return -EINVAL;
+ }
+
/* Get the requested fw roaming state */
type = nla_type(data);
if (type != GSCAN_ATTRIBUTE_ROAM_STATE_SET) {
struct sk_buff *reply;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
- WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
-
/* send to dongle only if we are not waiting for reload already */
if (dhdp && dhdp->hang_was_sent) {
WL_INFORM(("Bus down. HANG was sent up earlier\n"));
return OSL_ERROR(BCME_DONGLE_DOWN);
}
+ if (!data) {
+ WL_ERR(("data is not available\n"));
+ return BCME_BADARG;
+ }
+
+ if (len <= 0) {
+ WL_ERR(("invalid len %d\n", len));
+ return BCME_BADARG;
+ }
+
+ WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
+
+ if (nlioc->offset != sizeof(struct bcm_nlmsg_hdr) ||
+ len <= sizeof(struct bcm_nlmsg_hdr)) {
+ WL_ERR(("invalid offset %d\n", nlioc->offset));
+ return BCME_BADARG;
+ }
len -= sizeof(struct bcm_nlmsg_hdr);
ret_len = nlioc->len;
if (ret_len > 0 || len > 0) {
- if (len > DHD_IOCTL_MAXLEN) {
+ if (len >= DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize input buffer %d\n", len));
- len = DHD_IOCTL_MAXLEN;
+ len = DHD_IOCTL_MAXLEN - 1;
}
- if (ret_len > DHD_IOCTL_MAXLEN) {
+ if (ret_len >= DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize return buffer %d\n", ret_len));
- ret_len = DHD_IOCTL_MAXLEN;
+ ret_len = DHD_IOCTL_MAXLEN - 1;
}
+
payload = max(ret_len, len) + 1;
buf = vzalloc(payload);
if (!buf) {
return -ENOMEM;
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
memcpy(buf, (void *)((char *)nlioc + nlioc->offset), len);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
*((char *)buf + len) = '\0';
}
}
cur = buf;
while (ret_len > 0) {
- msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+ msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
ret_len -= msglen;
payload = msglen + sizeof(msglen);
reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
struct net_device *
wl_cfgvendor_get_ndev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
- const void *data, unsigned long int *out_addr)
+ const char *data, unsigned long int *out_addr)
{
char *pos, *pos1;
char ifname[IFNAMSIZ + 1] = {0};
struct net_info *iter, *next;
struct net_device *ndev = NULL;
+ ulong ifname_len;
*out_addr = (unsigned long int) data; /* point to command str by default */
/* check whether ifname=<ifname> is provided in the command */
WL_ERR(("command format error \n"));
return NULL;
}
- memcpy(ifname, pos, (pos1 - pos));
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+
+ ifname_len = pos1 - pos;
+ if (memcpy_s(ifname, (sizeof(ifname) - 1), pos, ifname_len) != BCME_OK) {
+ WL_ERR(("Failed to copy data. len: %ld\n", ifname_len));
+ return NULL;
+ }
+ GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname,
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ GCC_DIAGNOSTIC_POP();
WL_ERR(("Couldn't find ifname:%s in the netinfo list \n",
ifname));
return NULL;
return ndev;
}
-/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
- * would be a formatted string and reply buf would be the size of the
- * string.
- */
-#define WL_DRIVER_PRIV_CMD_LEN 512
-
#ifdef WL_SAE
static int
wl_cfgvendor_set_sae_password(struct wiphy *wiphy,
wsec_pmk_t pmk;
s32 bssidx;
+ /* clear the content of pmk structure before usage */
+ (void)memset_s(&pmk, sizeof(wsec_pmk_t), 0x0, sizeof(wsec_pmk_t));
+
if ((bssidx = wl_get_bssidx_by_wdev(cfg, net->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", net->ieee80211_ptr));
return BCME_ERROR;
}
- if (len < WSEC_MIN_PSK_LEN || len >= WSEC_MAX_PSK_LEN) {
- WL_ERR(("Invalid passphrase length %d..should be >=8 and <=63\n",
+ if ((len < WSEC_MIN_PSK_LEN) || (len >= WSEC_MAX_PASSPHRASE_LEN)) {
+ WL_ERR(("Invalid passphrase length %d..should be >= 8 and < 256\n",
len));
err = BCME_BADLEN;
goto done;
goto done;
}
pmk.key_len = htod16(len);
- bcopy((u8*)data, pmk.key, len);
+ bcopy((const u8*)data, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
err = wldev_ioctl_set(net, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
if (err) {
WL_ERR(("\n failed to set pmk %d\n", err));
goto done;
+ } else {
+ WL_MEM(("sae passphrase set successfully\n"));
}
done:
return err;
}
#endif /* WL_SAE */
+#ifdef BCM_PRIV_CMD_SUPPORT
+/* strlen("ifname=") + IFNAMESIZE + strlen(" ") + '\0' */
+#define ANDROID_PRIV_CMD_IF_PREFIX_LEN (7 + IFNAMSIZ + 2)
+/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
+ * would be a formatted string and reply buf would be the size of the
+ * string.
+ */
+#define WL_DRIVER_PRIV_CMD_LEN 512
static int
wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
int err = 0;
int data_len = 0, cmd_len = 0, tmp = 0, type = 0;
struct net_device *ndev = wdev->netdev;
- char *reply_buf = NULL;
char *cmd = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int bytes_written;
struct net_device *net = NULL;
unsigned long int cmd_out = 0;
- u32 reply_len = WL_DRIVER_PRIV_CMD_LEN;
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
+ u32 cmd_buf_len = WL_DRIVER_PRIV_CMD_LEN;
+ char cmd_prefix[ANDROID_PRIV_CMD_IF_PREFIX_LEN + 1] = {0};
+ char *cmd_buf = NULL;
+ char *current_pos;
+ u32 cmd_offset;
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
WL_DBG(("%s: Enter \n", __func__));
goto exit;
}
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
if (type == BRCM_ATTR_DRIVER_CMD) {
- if (cmd_len >= WL_DRIVER_PRIV_CMD_LEN) {
- WL_ERR(("Unexpected command length. Ignore the command\n"));
+ if ((cmd_len >= WL_DRIVER_PRIV_CMD_LEN) ||
+ (cmd_len < ANDROID_PRIV_CMD_IF_PREFIX_LEN)) {
+ WL_ERR(("Unexpected command length (%u)."
+ "Ignore the command\n", cmd_len));
err = -EINVAL;
goto exit;
}
- net = wl_cfgvendor_get_ndev(cfg, wdev, cmd, &cmd_out);
+
+ /* check whether there is any ifname prefix provided */
+ if (memcpy_s(cmd_prefix, (sizeof(cmd_prefix) - 1),
+ cmd, ANDROID_PRIV_CMD_IF_PREFIX_LEN) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ net = wl_cfgvendor_get_ndev(cfg, wdev, cmd_prefix, &cmd_out);
if (!cmd_out || !net) {
+ WL_ERR(("ndev not found\n"));
err = -ENODEV;
goto exit;
}
- cmd = (char *)cmd_out;
- reply_buf = (char *)MALLOCZ(cfg->osh, reply_len);
- if (!reply_buf) {
- WL_ERR(("memory alloc failed for %u \n", cmd_len));
+
+ /* find offset of the command */
+ current_pos = (char *)cmd_out;
+ cmd_offset = current_pos - cmd_prefix;
+
+ if (!current_pos || (cmd_offset) > ANDROID_PRIV_CMD_IF_PREFIX_LEN) {
+ WL_ERR(("Invalid len cmd_offset: %u \n", cmd_offset));
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Private command data in expected to be in str format. To ensure that
+ * the data is null terminated, copy to a local buffer before use
+ */
+ cmd_buf = (char *)MALLOCZ(cfg->osh, cmd_buf_len);
+ if (!cmd_buf) {
+ WL_ERR(("memory alloc failed for %u \n", cmd_buf_len));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Point to the start of command */
+ if (memcpy_s(cmd_buf, (WL_DRIVER_PRIV_CMD_LEN - 1),
+ (const void *)(cmd + cmd_offset),
+ (cmd_len - cmd_offset - 1)) != BCME_OK) {
+ WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
err = -ENOMEM;
goto exit;
}
- memcpy(reply_buf, cmd, cmd_len);
- WL_DBG(("vendor_command: %s len: %u \n", cmd, cmd_len));
- bytes_written = wl_handle_private_cmd(net, reply_buf, reply_len);
+ cmd_buf[WL_DRIVER_PRIV_CMD_LEN - 1] = '\0';
+
+ WL_DBG(("vendor_command: %s len: %u \n", cmd_buf, cmd_buf_len));
+ bytes_written = wl_handle_private_cmd(net, cmd_buf, cmd_buf_len);
WL_DBG(("bytes_written: %d \n", bytes_written));
if (bytes_written == 0) {
- snprintf(reply_buf, reply_len, "%s", "OK");
- data_len = strlen("OK");
+ snprintf(cmd_buf, cmd_buf_len, "%s", "OK");
+ data_len = sizeof("OK");
} else if (bytes_written > 0) {
- data_len = bytes_written > reply_len ?
- reply_len : bytes_written;
+ if (bytes_written >= (cmd_buf_len - 1)) {
+ /* Not expected */
+ ASSERT(0);
+ err = -EINVAL;
+ goto exit;
+ }
+ data_len = bytes_written;
} else {
/* -ve return value. Propagate the error back */
err = bytes_written;
goto exit;
}
+ if ((data_len > 0) && (data_len < (cmd_buf_len - 1)) && cmd_buf) {
+ err = wl_cfgvendor_send_cmd_reply(wiphy, cmd_buf, data_len);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ } else {
+ WL_DBG(("Vendor Command reply sent successfully!\n"));
+ }
+ } else {
+ /* No data to be sent back as reply */
+ WL_ERR(("Vendor_cmd: No reply expected. data_len:%u cmd_buf %p \n",
+ data_len, cmd_buf));
+ }
break;
}
- }
-
- if ((data_len > 0) && reply_buf) {
- err = wl_cfgvendor_send_cmd_reply(wiphy, reply_buf, data_len+1);
- if (unlikely(err))
- WL_ERR(("Vendor Command reply failed ret:%d \n", err));
- else
- WL_DBG(("Vendor Command reply sent successfully!\n"));
- } else {
- /* No data to be sent back as reply */
- WL_ERR(("Vendor_cmd: No reply expected. data_len:%u reply_buf %p \n",
- data_len, reply_buf));
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
}
exit:
- if (reply_buf) {
- MFREE(cfg->osh, reply_buf, reply_len);
+#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
+ if (cmd_buf) {
+ MFREE(cfg->osh, cmd_buf, cmd_buf_len);
}
+#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
net_os_wake_unlock(ndev);
return err;
}
+#endif /* BCM_PRIV_CMD_SUPPORT */
#ifdef WL_NAN
static const char *nan_attr_to_str(u16 cmd)
switch (attr_type) {
case NAN_ATTRIBUTE_MAC_ADDR:
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_PUBLISH_ID:
cmd_data->pub_id = nla_get_u16(iter);
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->peer_disc_mac_addr,
- (char*)nla_data(iter), ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->peer_disc_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_disc_mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->peer_ndi_mac_addr,
- (char*)nla_data(iter), ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->peer_ndi_mac_addr,
+ ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy peer_ndi_mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_MAC_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_IF_ADDR:
if (nla_len(iter) != ETHER_ADDR_LEN) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->if_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->if_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy if_addr\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_ENTRY_CONTROL:
if (nla_len(iter) != sizeof(uint8)) {
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_hash.data, nla_data(iter),
- cmd_data->svc_hash.dlen);
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
if (nla_len(iter) != sizeof(uint16)) {
}
cmd_data->svc_info.dlen = nla_get_u16(iter);
if (cmd_data->svc_info.dlen > MAX_APP_INFO_LEN) {
- WL_ERR(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
ret = -EINVAL;
goto exit;
}
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_info.data,
- nla_data(iter), cmd_data->svc_info.dlen);
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ goto exit;
+ }
break;
case NAN_ATTRIBUTE_PUBLISH_ID:
if (nla_len(iter) != sizeof(uint32)) {
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->key.data, nla_data(iter),
- MIN(cmd_data->key.dlen, NAN_MAX_PMK_LEN));
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ goto exit;
+ }
break;
default:
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
if (nla_len(iter) != sizeof(uint16)) {
}
cmd_data->svc_info.dlen = nla_get_u16(iter);
if (cmd_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
- WL_ERR(("Not allowed beyond :%d\n",
- NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond :%d\n",
+ NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
ret = -EINVAL;
goto exit;
}
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_info.data,
- nla_data(iter), cmd_data->svc_info.dlen);
+ ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc info\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SUBSCRIBE_ID:
if (nla_len(iter) != sizeof(uint16)) {
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->svc_hash.data, nla_data(iter),
- cmd_data->svc_hash.dlen);
+ ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy svc hash data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_PEER_ID:
if (nla_len(iter) != sizeof(uint32)) {
cmd_data->rx_match.dlen = nla_get_u16(iter);
if (cmd_data->rx_match.dlen > MAX_MATCH_FILTER_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
goto exit;
}
break;
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->rx_match.data, nla_data(iter),
- cmd_data->rx_match.dlen);
+ ret = memcpy_s(cmd_data->rx_match.data, cmd_data->rx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy rx match data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN:
if (nla_len(iter) != sizeof(uint16)) {
cmd_data->tx_match.dlen = nla_get_u16(iter);
if (cmd_data->tx_match.dlen > MAX_MATCH_FILTER_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
goto exit;
}
break;
ret = -EINVAL;
goto exit;
}
- memcpy(cmd_data->tx_match.data, nla_data(iter),
- cmd_data->tx_match.dlen);
+ ret = memcpy_s(cmd_data->tx_match.data, cmd_data->tx_match.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy tx match data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES:
if (nla_len(iter) != sizeof(uint16)) {
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->mac_list.list, nla_data(iter),
- (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN));
+ ret = memcpy_s(cmd_data->mac_list.list,
+ (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN),
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy list of mac addresses\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_TX_TYPE:
if (nla_len(iter) != sizeof(uint8)) {
ret = -EINVAL;
goto exit;
}
+ cmd_data->sde_control_config = TRUE;
if (nla_get_u8(iter) == 1) {
cmd_data->sde_control_flag
|= NAN_SDE_CF_RANGING_REQUIRED;
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->key.data, nla_data(iter),
- MIN(cmd_data->key.dlen, NAN_MAX_PMK_LEN));
+ ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to key data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG:
if (nla_len(iter) != sizeof(uint8)) {
cmd_data->sde_svc_info.dlen = nla_get_u16(iter);
if (cmd_data->sde_svc_info.dlen > MAX_SDEA_SVC_INFO_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
goto exit;
}
break;
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->sde_svc_info.data,
- nla_data(iter), cmd_data->sde_svc_info.dlen);
+ ret = memcpy_s(cmd_data->sde_svc_info.data,
+ cmd_data->sde_svc_info.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to sdea info data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_SECURITY:
if (nla_len(iter) != sizeof(uint8)) {
cmd_data->scid.dlen = nla_get_u32(iter);
if (cmd_data->scid.dlen > MAX_SCID_LEN) {
ret = -EINVAL;
- WL_ERR(("Not allowed beyond %d\n", MAX_SCID_LEN));
+ WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SCID_LEN));
goto exit;
}
WL_TRACE(("valid scid length = %u\n", cmd_data->scid.dlen));
ret = -EINVAL;
goto exit;
}
+
cmd_data->scid.data = MALLOCZ(cfg->osh, cmd_data->scid.dlen);
if (cmd_data->scid.data == NULL) {
WL_ERR(("failed to allocate scid, len=%d\n",
ret = -ENOMEM;
goto exit;
}
- memcpy(cmd_data->scid.data, nla_data(iter), cmd_data->scid.dlen);
+ ret = memcpy_s(cmd_data->scid.data, cmd_data->scid.dlen,
+ nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to scid data\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_2G_AWAKE_DW:
if (nla_len(iter) != sizeof(uint32)) {
ret = -EINVAL;
goto exit;
}
- memcpy((char*)&cmd_data->mac_addr, (char*)nla_data(iter),
- ETHER_ADDR_LEN);
+ ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
+ (char*)nla_data(iter), nla_len(iter));
+ if (ret != BCME_OK) {
+ WL_ERR(("Failed to copy mac addr\n"));
+ return ret;
+ }
break;
case NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL:
if (nla_len(iter) != sizeof(uint32)) {
}
}
report->status = (rtt_reason_t)status;
+ report->type = RTT_TWO_WAY;
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
goto exit;
}
- nla_put_u32(msg, RTT_ATTRIBUTE_RESULTS_COMPLETE, 1);
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULTS_COMPLETE, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
+ goto exit;
+ }
rtt_nl_hdr = nla_nest_start(msg, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
if (!rtt_nl_hdr) {
WL_ERR(("rtt_nl_hdr is NULL\n"));
ret = BCME_NOMEM;
goto exit;
}
- nla_put(msg, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &report->addr);
- nla_put_u32(msg, RTT_ATTRIBUTE_RESULT_CNT, 1);
- nla_put(msg, RTT_ATTRIBUTE_RESULT,
- sizeof(*report), report);
+ ret = nla_put(msg, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &report->addr);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC\n"));
+ goto exit;
+ }
+ ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULT_CNT, 1);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT\n"));
+ goto exit;
+ }
+ ret = nla_put(msg, RTT_ATTRIBUTE_RESULT,
+ sizeof(*report), report);
+ if (ret < 0) {
+ WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS\n"));
+ goto exit;
+ }
nla_nest_end(msg, rtt_nl_hdr);
cfg80211_vendor_event(msg, kflags);
+ if (report) {
+ MFREE(cfg->osh, report, sizeof(*report));
+ }
+
return ret;
exit:
if (msg)
dev_kfree_skb_any(msg);
WL_ERR(("Failed to send event GOOGLE_RTT_COMPLETE_EVENT,"
- " -- Free skb, ret = %d\n", ret));
+ " -- Free skb, ret = %d\n", ret));
if (report)
MFREE(cfg->osh, report, sizeof(*report));
NAN_DBG_EXIT();
}
#endif /* RTT_SUPPORT */
+static int
+wl_cfgvendor_send_nan_async_resp(struct wiphy *wiphy, struct net_device *dev,
+ int event_id, u8* nan_req_resp, u16 len)
+{
+ int ret = BCME_OK;
+ int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ struct sk_buff *msg;
+
+ NAN_DBG_ENTER();
+
+ /* Allocate the skb for vendor event */
+ msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev), buf_len,
+ event_id, kflags);
+ if (!msg) {
+ WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ ret = nla_put(msg, NAN_ATTRIBUTE_CMD_RESP_DATA,
+ len, (u8*)nan_req_resp);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put resp data, ret=%d\n",
+ ret));
+ goto fail;
+ }
+ WL_DBG(("Event sent up to hal, event_id = %d, ret = %d\n",
+ event_id, ret));
+ cfg80211_vendor_event(msg, kflags);
+ NAN_DBG_EXIT();
+ return ret;
+
+fail:
+ dev_kfree_skb_any(msg);
+ WL_ERR(("Event not implemented or unknown -- Free skb, event_id = %d, ret = %d\n",
+ event_id, ret));
+ NAN_DBG_EXIT();
+ return ret;
+}
+
+int
+wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev)
+{
+ int ret = BCME_OK;
+ struct wiphy *wiphy = wdev->wiphy;
+ nan_hal_resp_t nan_req_resp;
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
+ nan_req_resp.status = NAN_STATUS_SUCCESS;
+ nan_req_resp.value = BCME_OK;
+
+ ret = wl_cfgvendor_send_nan_async_resp(wiphy, wdev->netdev,
+ NAN_ASYNC_RESPONSE_DISABLED, (u8*)&nan_req_resp, sizeof(nan_req_resp));
+ WL_INFORM_MEM(("[NAN] Disable done\n"));
+ return ret;
+}
+
int
wl_cfgvendor_send_nan_event(struct wiphy *wiphy, struct net_device *dev,
int event_id, nan_event_data_t *event_data)
NAN_DBG_ENTER();
/* Allocate the skb for vendor event */
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- msg = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(dev), buf_len, event_id, kflags);
-#else
- msg = cfg80211_vendor_event_alloc(wiphy, buf_len, event_id, kflags);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
-
+ msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev), buf_len,
+ event_id, kflags);
if (!msg) {
WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
return -ENOMEM;
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
ret = BCME_OK;
goto exit;
}
-
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
goto exit;
}
/* Initializing Instance Id List */
- memset(cfg->nan_inst_ctrl, 0, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
+ bzero(cfg->nan_inst_ctrl, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_ENABLE,
&nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
return ret;
}
+static int
+wl_cfgvendor_terminate_dp_rng_sessions(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, bool *ssn_exists)
+{
+ int ret = 0;
+ uint8 i = 0;
+ int status = BCME_ERROR;
+ nan_ranging_inst_t *ranging_inst = NULL;
+
+ /* Cleanup active Data Paths If any */
+ for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
+ if (cfg->nancfg.ndp_id[i]) {
+ *ssn_exists = true;
+ WL_DBG(("Found entry of ndp id = [%d], end dp associated to it\n",
+ cfg->nancfg.ndp_id[i]));
+ wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
+ cfg->nancfg.ndp_id[i], &status);
+ }
+ }
+
+ /* Cancel ranging sessiosns */
+ for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
+ ranging_inst = &cfg->nan_ranging_info[i];
+ if (ranging_inst->range_id) {
+ *ssn_exists = true;
+ ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
+ ranging_inst->range_id,
+ NAN_RNG_TERM_FLAG_NONE, &status);
+ if (unlikely(ret) || unlikely(status)) {
+ WL_ERR(("nan range cancel failed ret = %d status = %d\n",
+ ret, status));
+ }
+ }
+ }
+ return ret;
+}
+
static int
wl_cfgvendor_nan_stop_handler(struct wiphy *wiphy,
struct wireless_dev *wdev, const void * data, int len)
int ret = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nan_hal_resp_t nan_req_resp;
+ bool ssn_exists = false;
+
NAN_DBG_ENTER();
if (!cfg->nan_init_state) {
goto exit;
}
+ mutex_lock(&cfg->if_sync);
if (cfg->nan_enable) {
- ret = wl_cfgnan_disable(cfg, NAN_USER_INITIATED);
- if (ret) {
- WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ cfg->nancfg.disable_reason = NAN_USER_INITIATED;
+ wl_cfgvendor_terminate_dp_rng_sessions(cfg, wdev, &ssn_exists);
+ if (ssn_exists == true) {
+ /*
+ * Schedule nan disable with 4sec delay to make sure
+ * fw cleans any active Data paths and
+ * notifies the peer about the dp session terminations
+ */
+ WL_INFORM_MEM(("Schedule Nan Disable Req, with 4sec\n"));
+ schedule_delayed_work(&cfg->nan_disable,
+ msecs_to_jiffies(NAN_DISABLE_CMD_DELAY_TIMER));
+ } else {
+ ret = wl_cfgnan_disable(cfg);
+ if (ret) {
+ WL_ERR(("failed to disable nan, error[%d]\n", ret));
+ }
}
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ mutex_unlock(&cfg->if_sync);
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DISABLE,
- &nan_req_resp, ret, BCME_OK);
NAN_DBG_EXIT();
return ret;
}
goto exit;
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
cmd_data->avail_params.duration = NAN_BAND_INVALID; /* Setting to some default */
cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
if (ret) {
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
-
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgnan_get_capablities_handler(wdev->netdev, cfg, &nan_req_resp.capabilities);
if (ret) {
WL_ERR(("Could not get capabilities\n"));
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_GET_CAPABILITIES,
&nan_req_resp, ret, BCME_OK);
- wl_cfgvendor_send_cmd_reply(wiphy, &nan_req_resp, sizeof(nan_req_resp));
-
NAN_DBG_EXIT();
return ret;
}
struct wireless_dev *wdev, const void * data, int len)
{
int ret = 0;
- s32 idx;
nan_datapath_cmd_data_t *cmd_data = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nan_hal_resp_t nan_req_resp;
}
NAN_DBG_ENTER();
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
goto exit;
}
- /* Store the iface name to pub data so that it can be used
- * during NAN enable
- */
- if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
- WL_ERR(("No free idx for NAN NDI\n"));
- goto exit;
- }
- wl_cfgnan_add_ndi_data(cfg, idx, (char*)cmd_data->ndp_iface);
if (cfg->nan_enable) { /* new framework Impl, iface create called after nan enab */
- wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
+ ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
cfg, cmd_data->ndp_iface,
NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
- cfg->nancfg.ndi[idx].created = true;
+ if (ret != BCME_OK) {
+ WL_ERR(("failed to create iface, ret = %d\n", ret));
+ goto exit;
+ }
}
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE,
nan_hal_resp_t nan_req_resp;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
- if (!cfg->nan_init_state) {
+ if (cfg->nan_init_state == false) {
WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
/* Deinit has taken care of cleaing the virtual iface */
ret = BCME_OK;
ret = BCME_NOMEM;
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
(char*)cmd_data->ndp_iface,
NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
if (ret) {
- if (ret == -ENODEV) {
- if (wl_cfgnan_get_ndi_data(cfg, (char*)cmd_data->ndp_iface) != NULL) {
- /* NDIs have been removed by the NAN disable command */
- WL_DBG(("NDI removed by nan_disable\n"));
- ret = BCME_OK;
- }
- } else {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- goto exit;
- }
+ WL_ERR(("failed to delete ndp iface [%d]\n", ret));
+ goto exit;
}
exit:
- if (cfg->nan_init_state) {
- /* After successful delete of interface, clear up the ndi data */
- if (wl_cfgnan_del_ndi_data(cfg, (char*)cmd_data->ndp_iface) < 0) {
- WL_ERR(("Failed to find matching data for ndi:%s\n",
- (char*)cmd_data->ndp_iface));
- }
- }
-
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE,
&nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
nan_datapath_cmd_data_t *cmd_data = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nan_hal_resp_t nan_req_resp;
+ int status = BCME_ERROR;
NAN_DBG_ENTER();
if (!cfg->nan_enable) {
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
if (ret) {
WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
goto exit;
}
- ret = wl_cfgnan_data_path_end_handler(wdev->netdev, cfg, cmd_data);
+ ret = wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
+ cmd_data->ndp_instance_id, &status);
if (ret) {
WL_ERR(("failed to end nan data path [%d]\n", ret));
goto exit;
}
exit:
ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_END,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
+ &nan_req_resp, ret, cmd_data ? status : BCME_OK);
wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
NAN_DBG_EXIT();
return ret;
WL_ERR(("failed to parse sec info args\n"));
goto exit;
}
- memset(&nan_req_resp, 0, sizeof(nan_req_resp));
+
+ bzero(&nan_req_resp, sizeof(nan_req_resp));
ret = wl_cfgnan_sec_info_handler(cfg, cmd_data, &nan_req_resp);
if (ret) {
WL_ERR(("failed to retrieve svc hash/pub nmi error[%d]\n", ret));
RETURN_EIO_IF_NOT_UP(cfg);
/* Get the device rev info */
- memset(&revinfo, 0, sizeof(revinfo));
+ bzero(&revinfo, sizeof(revinfo));
err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
sizeof(revinfo));
if (err != BCME_OK) {
return -ENOMEM;
}
- memset(&scbval, 0, sizeof(scb_val_t));
- memset(outdata, 0, WLC_IOCTL_MAXLEN);
+ bzero(&scbval, sizeof(scb_val_t));
+ bzero(outdata, WLC_IOCTL_MAXLEN);
output = outdata;
err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
}
radio = (wifi_radio_stat *)iovar_buf;
- memset(&radio_h, 0, sizeof(wifi_radio_stat_h));
+ bzero(&radio_h, sizeof(wifi_radio_stat_h));
radio_h.on_time = radio->on_time;
radio_h.tx_time = radio->tx_time;
radio_h.rx_time = radio->rx_time;
}
#endif /* LINKSTAT_SUPPORT */
-#ifdef DEBUGABILITY
-static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+#ifdef DHD_LOG_DUMP
+static int
+wl_cfgvendor_get_buf_data(const struct nlattr *iter, struct buf_data **buf)
{
- int ret = BCME_OK, rem, type;
- char ring_name[DBGRING_NAME_MAX] = {0};
- int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ int ret = BCME_OK;
+
+ if (nla_len(iter) != sizeof(struct buf_data)) {
+ WL_ERR(("Invalid len : %d\n", nla_len(iter)));
+ ret = BCME_BADLEN;
+ }
+ (*buf) = (struct buf_data *)nla_data(iter);
+ if (!(*buf) || (((*buf)->len) <= 0) || !((*buf)->data_buf[0])) {
+ WL_ERR(("Invalid buffer\n"));
+ ret = BCME_ERROR;
+ }
+ return ret;
+}
+
+static int
+wl_cfgvendor_dbg_file_dump(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type = 0;
const struct nlattr *iter;
+ char *mem_buf = NULL;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
+ struct buf_data *buf;
+ int pos = 0;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ if (!skb) {
+ WL_ERR(("skb allocation is failed\n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ WL_ERR(("%s\n", __FUNCTION__));
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
+ ret = wl_cfgvendor_get_buf_data(iter, &buf);
+ if (ret)
+ goto exit;
switch (type) {
- case DEBUG_ATTRIBUTE_RING_NAME:
- strncpy(ring_name, nla_data(iter),
- MIN(sizeof(ring_name) -1, nla_len(iter)));
+ case DUMP_BUF_ATTR_MEMDUMP:
+ ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf,
+ (uint32 *)(&(buf->len)));
+ if (ret) {
+ WL_ERR(("failed to get_socram_dump : %d\n", ret));
+ goto exit;
+ }
+ ret = dhd_export_debug_data(mem_buf, NULL, buf->data_buf[0],
+ (int)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_LEVEL:
- log_level = nla_get_u32(iter);
+
+ case DUMP_BUF_ATTR_TIMESTAMP :
+ ret = dhd_print_time_str(buf->data_buf[0], NULL,
+ (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_RING_FLAGS:
- flags = nla_get_u32(iter);
+#ifdef EWP_ECNTRS_LOGGING
+ case DUMP_BUF_ATTR_ECNTRS :
+ ret = dhd_print_ecntrs_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
- time_intval = nla_get_u32(iter);
+#endif /* EWP_ECNTRS_LOGGING */
+#ifdef DHD_STATUS_LOGGING
+ case DUMP_BUF_ATTR_STATUS_LOG :
+ ret = dhd_print_status_log_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
break;
- case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
- threshold = nla_get_u32(iter);
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ case DUMP_BUF_ATTR_RTT_LOG :
+ ret = dhd_print_rtt_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* EWP_RTT_LOGGING */
+ case DUMP_BUF_ATTR_DHD_DUMP :
+ ret = dhd_print_dump_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#if defined(BCMPCIE)
+ case DUMP_BUF_ATTR_EXT_TRAP :
+ ret = dhd_print_ext_trap_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif /* BCMPCIE */
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ case DUMP_BUF_ATTR_HEALTH_CHK :
+ ret = dhd_print_health_chk_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif // endif
+ case DUMP_BUF_ATTR_COOKIE :
+ ret = dhd_print_cookie_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#ifdef DHD_DUMP_PCIE_RINGS
+ case DUMP_BUF_ATTR_FLOWRING_DUMP :
+ ret = dhd_print_flowring_data(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len, &pos);
+ break;
+#endif // endif
+ case DUMP_BUF_ATTR_GENERAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_GENERAL, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_PRESERVE_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_PRESERVE, &pos);
+ break;
+
+ case DUMP_BUF_ATTR_SPECIAL_LOG :
+ ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
+ buf->data_buf[0], NULL, (uint32)buf->len,
+ DLD_BUF_TYPE_SPECIAL, &pos);
+ break;
+#ifdef DHD_SSSR_DUMP
+ case DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_C0_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 0);
break;
+
+ case DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE :
+ ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_C1_D11_AFTER :
+ ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len, 1);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_DIG_BEFORE :
+ ret = dhd_sssr_dump_dig_buf_before(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+
+ case DUMP_BUF_ATTR_SSSR_DIG_AFTER :
+ ret = dhd_sssr_dump_dig_buf_after(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DHD_SSSR_DUMP */
+#ifdef DNGL_AXI_ERROR_LOGGING
+ case DUMP_BUF_ATTR_AXI_ERROR:
+ ret = dhd_os_get_axi_error_dump(bcmcfg_to_prmry_ndev(cfg),
+ buf->data_buf[0], (uint32)buf->len);
+ break;
+#endif /* DNGL_AXI_ERROR_LOGGING */
default:
WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_BADADDR;
+ ret = BCME_ERROR;
goto exit;
}
}
- ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ if (ret)
+ goto exit;
+
+ ret = nla_put_u32(skb, type, (uint32)(ret));
if (ret < 0) {
- WL_ERR(("start_logging is failed ret: %d\n", ret));
+ WL_ERR(("Failed to put type, ret:%d\n", ret));
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+ if (ret) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
}
-exit:
return ret;
-}
-
-static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
-
- ret = dhd_os_reset_logging(dhd_pub);
- if (ret < 0) {
- WL_ERR(("reset logging is failed ret: %d\n", ret));
+exit:
+ if (skb) {
+ /* Free skb memory */
+ kfree_skb(skb);
}
-
return ret;
}
+#endif /* DHD_LOG_DUMP */
+#ifdef DEBUGABILITY
static int
wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
int ret = BCME_OK;
uint32 alloc_len;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ WL_ERR(("wl_cfgvendor_dbg_trigger_mem_dump %d\n", __LINE__));
+
dhdp->memdump_type = DUMP_TYPE_CFG_VENDOR_TRIGGERED;
ret = dhd_os_socram_dump(bcmcfg_to_prmry_ndev(cfg), &alloc_len);
if (ret) {
goto exit;
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to put fw dump length, ret=%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ goto exit;
}
-
+ return ret;
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
uintptr_t user_buf = (uintptr_t)NULL;
const struct nlattr *iter;
char *mem_buf = NULL;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nla_for_each_attr(iter, data, len, rem) {
}
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto free_mem;
}
/* Indicate the memdump is succesfully copied */
- nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+ ret = nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_FW_DUMP_DATA, ret:%d\n", ret));
+ goto free_mem;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
}
+ skb = NULL;
}
free_mem:
vfree(mem_buf);
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
exit:
return ret;
}
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK, rem, type;
+ char ring_name[DBGRING_NAME_MAX] = {0};
+ int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ const struct nlattr *iter;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case DEBUG_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(iter),
+ MIN(sizeof(ring_name) -1, nla_len(iter)));
+ break;
+ case DEBUG_ATTRIBUTE_LOG_LEVEL:
+ log_level = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_RING_FLAGS:
+ flags = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
+ time_intval = nla_get_u32(iter);
+ break;
+ case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+ threshold = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADADDR;
+ goto exit;
+ }
+ }
+
+ ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ if (ret < 0) {
+ WL_ERR(("start_logging is failed ret: %d\n", ret));
+ }
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ ret = dhd_os_reset_logging(dhd_pub);
+ if (ret < 0) {
+ WL_ERR(("reset logging is failed ret: %d\n", ret));
+ }
+
+ return ret;
+}
+
static int wl_cfgvendor_dbg_get_ring_status(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
dhd_dbg_ring_status_t ring_status;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhd_pub = cfg->pub;
- memset(dbg_ring_status, 0, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
+ bzero(dbg_ring_status, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
ring_cnt = 0;
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
ret = dhd_os_get_ring_status(dhd_pub, ring_id, &ring_status);
}
/* Alloc the SKB for vendor_event */
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
- (DBG_RING_STATUS_SIZE * ring_cnt) + 100);
+ nla_total_size(DBG_RING_STATUS_SIZE) * ring_cnt + nla_total_size(sizeof(ring_cnt)));
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
+ /* Ignore return of nla_put_u32 and nla_put since the skb allocated
+ * above has a requested size for all payload
+ */
+ (void)nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
for (i = 0; i < ring_cnt; i++) {
- nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
+ (void)nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
&dbg_ring_status[i]);
}
ret = cfg80211_vendor_cmd_reply(skb);
type = nla_type(iter);
switch (type) {
case DEBUG_ATTRIBUTE_RING_NAME:
- strncpy(ring_name, nla_data(iter),
- MIN(sizeof(ring_name) -1, nla_len(iter)));
+ strlcpy(ring_name, nla_data(iter), sizeof(ring_name));
break;
default:
WL_ERR(("Unknown type: %d\n", type));
nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
cfg80211_vendor_event(skb, kflags);
}
+#endif /* DEBUGABILITY */
+
+#ifdef DHD_LOG_DUMP
+static int wl_cfgvendor_nla_put_sssr_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+#ifdef DHD_SSSR_DUMP
+ uint32 arr_len[DUMP_SSSR_ATTR_COUNT];
+ int i = 0, j = 0;
+#endif /* DHD_SSSR_DUMP */
+ char memdump_path[MEMDUMP_PATH_LEN];
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 0 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_0_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_core_1_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_before_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig before dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
+ "sssr_dump_dig_after_SR");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dig after dump path, ret=%d\n", ret));
+ goto exit;
+ }
+
+#ifdef DHD_SSSR_DUMP
+ memset(arr_len, 0, sizeof(arr_len));
+ dhd_nla_put_sssr_dump_len(ndev, arr_len);
+
+ for (i = 0, j = DUMP_SSSR_ATTR_START; i < DUMP_SSSR_ATTR_COUNT; i++, j++) {
+ if (arr_len[i]) {
+ ret = nla_put_u32(skb, j, arr_len[i]);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put sssr dump len, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+ }
+#endif /* DHD_SSSR_DUMP */
+
+exit:
+ return ret;
+}
+
+static int wl_cfgvendor_nla_put_debug_dump_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = BCME_OK;
+ uint32 len = 0;
+ char dump_path[128];
-static void wl_cfgvendor_dbg_send_urgent_evt(void *ctx, const void *data,
+ ret = dhd_get_debug_dump_file_name(ndev, NULL, dump_path, sizeof(dump_path));
+ if (ret < 0) {
+ WL_ERR(("%s: Failed to get debug dump filename\n", __FUNCTION__));
+ goto exit;
+ }
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_DEBUG_DUMP, dump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put debug dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ WL_ERR(("debug_dump path = %s%s\n", dump_path, FILE_NAME_HAL_TAG));
+ wl_print_verinfo(wl_get_cfg(ndev));
+
+ len = dhd_get_time_str_len();
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_TIMESTAMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put time stamp length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_GENERAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_GENERAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put general log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef EWP_ECNTRS_LOGGING
+ len = dhd_get_ecntrs_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_ECNTRS, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ecntrs length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_ECNTRS_LOGGING */
+ len = dhd_get_dld_len(DLD_BUF_TYPE_SPECIAL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_SPECIAL_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put special log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+ len = dhd_get_dhd_dump_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_DHD_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put dhd dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+#if defined(BCMPCIE)
+ len = dhd_get_ext_trap_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_EXT_TRAP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put ext trap length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* BCMPCIE */
+
+#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
+ len = dhd_get_health_chk_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_HEALTH_CHK, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put health check length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif // endif
+
+ len = dhd_get_dld_len(DLD_BUF_TYPE_PRESERVE);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_PRESERVE_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put preserve log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+
+ len = dhd_get_cookie_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_COOKIE, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put cookie length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#ifdef DHD_DUMP_PCIE_RINGS
+ len = dhd_get_flowring_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_FLOWRING_DUMP, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put flowring dump length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif // endif
+#ifdef DHD_STATUS_LOGGING
+ len = dhd_get_status_log_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_STATUS_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put status log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* DHD_STATUS_LOGGING */
+#ifdef EWP_RTT_LOGGING
+ len = dhd_get_rtt_len(ndev, NULL);
+ if (len) {
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_RTT_LOG, len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put rtt log length, ret=%d\n", ret));
+ goto exit;
+ }
+ }
+#endif /* EWP_RTT_LOGGING */
+exit:
+ return ret;
+}
+#ifdef DNGL_AXI_ERROR_LOGGING
+static void wl_cfgvendor_nla_put_axi_error_data(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int ret = 0;
+ char axierrordump_path[MEMDUMP_PATH_LEN];
+ int dumpsize = dhd_os_get_axi_error_dump_size(ndev);
+ if (dumpsize <= 0) {
+ WL_ERR(("Failed to calcuate axi error dump len\n"));
+ return;
+ }
+ dhd_os_get_axi_error_filename(ndev, axierrordump_path, MEMDUMP_PATH_LEN);
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_AXI_ERROR_DUMP, axierrordump_path);
+ if (ret) {
+ WL_ERR(("Failed to put filename\n"));
+ return;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_AXI_ERROR, dumpsize);
+ if (ret) {
+ WL_ERR(("Failed to put filesize\n"));
+ return;
+ }
+}
+#endif /* DNGL_AXI_ERROR_LOGGING */
+
+static int wl_cfgvendor_nla_put_memdump_data(struct sk_buff *skb,
+ struct net_device *ndev, const uint32 fw_len)
+{
+ char memdump_path[MEMDUMP_PATH_LEN];
+ int ret = BCME_OK;
+
+ dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN, "mem_dump");
+ ret = nla_put_string(skb, DUMP_FILENAME_ATTR_MEM_DUMP, memdump_path);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump path, ret=%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, DUMP_LEN_ATTR_MEMDUMP, fw_len);
+ if (unlikely(ret)) {
+ WL_ERR(("Failed to nla put mem dump length, ret=%d\n", ret));
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void wl_cfgvendor_dbg_send_file_dump_evt(void *ctx, const void *data,
const uint32 len, const uint32 fw_len)
{
struct net_device *ndev = ctx;
struct wiphy *wiphy;
gfp_t kflags;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhd_pub;
+ int ret = BCME_OK;
+
if (!ndev) {
WL_ERR(("ndev is NULL\n"));
return;
}
+
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
wiphy = ndev->ieee80211_ptr->wiphy;
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + 100,
- GOOGLE_FW_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
#else
- skb = cfg80211_vendor_event_alloc(wiphy, len + 100,
- GOOGLE_FW_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, len + CFG80211_VENDOR_EVT_SKB_SZ,
+ GOOGLE_FILE_DUMP_EVENT, kflags);
#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
if (!skb) {
WL_ERR(("skb alloc failed"));
return;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, fw_len);
- nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
+
+ cfg = wiphy_priv(wiphy);
+ dhd_pub = cfg->pub;
+#ifdef DNGL_AXI_ERROR_LOGGING
+ if (dhd_pub->smmu_fault_occurred) {
+ wl_cfgvendor_nla_put_axi_error_data(skb, ndev);
+ }
+#endif /* DNGL_AXI_ERROR_LOGGING */
+#ifdef DHD_FW_COREDUMP
+ if (dhd_pub->memdump_enabled || (dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP))
+#else
+ if ((dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP))
+#endif
+ {
+ if (((ret = wl_cfgvendor_nla_put_memdump_data(skb, ndev, fw_len)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_debug_dump_data(skb, ndev)) < 0) ||
+ ((ret = wl_cfgvendor_nla_put_sssr_dump_data(skb, ndev)) < 0)) {
+ WL_ERR(("nla put failed\n"));
+ goto done;
+ }
+ }
+ /* TODO : Similar to above function add for debug_dump, sssr_dump, and pktlog also. */
cfg80211_vendor_event(skb, kflags);
+ return;
+done:
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ }
}
-#endif /* DEBUGABILITY */
+#endif /* DHD_LOG_DUMP */
static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
goto exit;
}
- nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+ ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+ if (ret < 0) {
+ WL_ERR(("Failed to put DEBUG_ATTRIBUTE_PKT_FATE_NUM, ret:%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor Command reply failed ret:%d \n", ret));
}
+ return ret;
exit:
+ /* Free skb memory */
+ if (skb) {
+ kfree_skb(skb);
+ }
return ret;
}
struct wireless_dev *wdev, const void *data, int len)
{
struct net_device *ndev = wdev_to_ndev(wdev);
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
int ret, ver, max_len, mem_needed;
/* APF version */
return -ENOMEM;
}
- nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
- nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_VERSION, ret:%d\n", ret));
+ goto exit;
+ }
+ ret = nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+ if (ret < 0) {
+ WL_ERR(("Failed to put APF_ATTRIBUTE_MAX_LEN, ret:%d\n", ret));
+ goto exit;
+ }
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor command reply failed, ret=%d\n", ret));
}
-
+ return ret;
+exit:
+ /* Free skb memory */
+ kfree_skb(skb);
return ret;
}
}
#endif /* NDO_CONFIG_SUPPORT */
+/* for kernel >= 4.13 NL80211 wl_cfg80211_set_pmk have to be used. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+static int wl_cfgvendor_set_pmk(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ wsec_pmk_t pmk;
+ const struct nlattr *iter;
+ int rem, type;
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_security *sec;
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case BRCM_ATTR_DRIVER_KEY_PMK:
+ if (nla_len(iter) > sizeof(pmk.key)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ pmk.flags = 0;
+ pmk.key_len = htod16(nla_len(iter));
+ bcopy((uint8 *)nla_data(iter), pmk.key, len);
+ break;
+ default:
+ WL_ERR(("Unknown type: %d\n", type));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
+ (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
+ ret = wldev_iovar_setbuf(ndev, "okc_info_pmk", pmk.key, pmk.key_len, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
+ }
+ }
+
+ ret = wldev_ioctl_set(ndev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ WL_INFORM_MEM(("IOVAR set_pmk ret:%d", ret));
+exit:
+ return ret;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+static int wl_cfgvendor_get_driver_feature(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ u8 supported[(BRCM_WLAN_VENDOR_FEATURES_MAX / 8) + 1] = {0};
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+ struct sk_buff *skb;
+ int32 mem_needed;
+
+ mem_needed = VENDOR_REPLY_OVERHEAD + NLA_HDRLEN + sizeof(supported);
+
+ BCM_REFERENCE(dhd_pub);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ if (FW_SUPPORTED(dhd_pub, idsup)) {
+ ret = wl_features_set(supported, sizeof(supported),
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ ret = nla_put(skb, BRCM_ATTR_DRIVER_FEATURE_FLAGS, sizeof(supported), supported);
+ if (ret) {
+ kfree_skb(skb);
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+exit:
+ return ret;
+}
+
static const struct wiphy_vendor_command wl_vendor_cmds [] = {
{
{
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_priv_string_handler
},
+#ifdef BCM_PRIV_CMD_SUPPORT
{
{
.vendor_id = OUI_BRCM,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_priv_bcm_handler
},
+#endif /* BCM_PRIV_CMD_SUPPORT */
#ifdef WL_SAE
{
{
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wl_cfgvendor_dbg_get_version
},
+#ifdef DHD_LOG_DUMP
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_FILE_DUMP_BUF
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_dbg_file_dump
+ },
+#endif /* DHD_LOG_DUMP */
+
#ifdef DEBUGABILITY
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_START_LOGGING
+ .subcmd = DEBUG_TRIGGER_MEM_DUMP
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_start_logging
+ .doit = wl_cfgvendor_dbg_trigger_mem_dump
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_RESET_LOGGING
+ .subcmd = DEBUG_GET_MEM_DUMP
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_reset_logging
+ .doit = wl_cfgvendor_dbg_get_mem_dump
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_TRIGGER_MEM_DUMP
+ .subcmd = DEBUG_START_LOGGING
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_trigger_mem_dump
+ .doit = wl_cfgvendor_dbg_start_logging
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_MEM_DUMP
+ .subcmd = DEBUG_RESET_LOGGING
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_dbg_get_mem_dump
+ .doit = wl_cfgvendor_dbg_reset_logging
},
{
{
.doit = wl_cfgvendor_set_rssi_monitor
},
#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef DHD_WAKE_STATUS
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_GET_WAKE_REASON_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_wake_reason_stats
+ },
+#endif /* DHD_WAKE_STATUS */
#ifdef DHDTCPACK_SUPPRESS
{
{
.doit = wl_cfgvendor_set_tcpack_sup_mode
},
#endif /* DHDTCPACK_SUPPRESS */
-#ifdef DHD_WAKE_STATUS
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_SET_PMK
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_pmk
+ },
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_GET_FEATURES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_driver_feature
+ },
+#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_WAKE_REASON_STATS
+ .subcmd = DEBUG_FILE_DUMP_DONE_IND
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
- .doit = wl_cfgvendor_get_wake_reason_stats
+ .doit = wl_cfgvendor_notify_dump_completion
+ },
+#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+#if defined(WL_CFG80211)
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_START
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_hal_started
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = DEBUG_SET_HAL_STOP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_stop_hal
}
-#endif /* DHD_WAKE_STATUS */
+#endif /* WL_CFG80211 */
};
static const struct nl80211_vendor_cmd_info wl_vendor_events [] = {
{ OUI_GOOGLE, GOOGLE_ROAM_EVENT_START},
{ OUI_BRCM, BRCM_VENDOR_EVENT_HANGED},
{ OUI_BRCM, BRCM_VENDOR_EVENT_SAE_KEY},
- { OUI_BRCM, BRCM_VENDOR_EVENT_BEACON_RECV}
+ { OUI_BRCM, BRCM_VENDOR_EVENT_BEACON_RECV},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_PORT_AUTHORIZED},
+ { OUI_GOOGLE, GOOGLE_FILE_DUMP_EVENT },
+ { OUI_BRCM, BRCM_VENDOR_EVENT_CU},
+ { OUI_BRCM, BRCM_VENDOR_EVENT_WIPS},
+ { OUI_GOOGLE, NAN_ASYNC_RESPONSE_DISABLED}
};
int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd)
#ifdef DEBUGABILITY
dhd_os_dbg_register_callback(FW_VERBOSE_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
dhd_os_dbg_register_callback(DHD_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
- dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_urgent_evt);
#endif /* DEBUGABILITY */
+#ifdef DHD_LOG_DUMP
+ dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_file_dump_evt);
+#endif /* DHD_LOG_DUMP */
return 0;
}
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.h 764233 2018-05-24 08:47:34Z $
+ * $Id: wl_cfgvendor.h 825255 2019-06-13 12:26:42Z $
*/
#ifndef _wl_cfgvendor_h_
#define OUI_BRCM 0x001018
#define OUI_GOOGLE 0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR 1
#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN
#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN
#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
enum brcm_vendor_attr {
- BRCM_ATTR_DRIVER_CMD,
- BRCM_ATTR_DRIVER_MAX
+ BRCM_ATTR_DRIVER_CMD = 0,
+ BRCM_ATTR_DRIVER_KEY_PMK = 1,
+ BRCM_ATTR_DRIVER_FEATURE_FLAGS = 2,
+ BRCM_ATTR_DRIVER_MAX = 3
+};
+
+enum brcm_wlan_vendor_features {
+ BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0,
+ BRCM_WLAN_VENDOR_FEATURES_MAX = 1
};
#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
#define NAN_SID_BEACON_COUNT_INVALID 0xff
#define WL_NAN_DW_INTERVAL 512
+#define CFG80211_VENDOR_CMD_REPLY_SKB_SZ 100
+#define CFG80211_VENDOR_EVT_SKB_SZ 2048
+
typedef enum {
/* don't use 0 as a valid subcommand */
VENDOR_NL80211_SUBCMD_UNSPECIFIED,
WIFI_SUBCMD_CONFIG_TCPACK_SUP,
WIFI_SUBCMD_FW_ROAM_POLICY,
WIFI_SUBCMD_ROAM_CAPABILITY,
-
RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
RTT_SUBCMD_CANCEL_CONFIG,
RTT_SUBCMD_GETCAPABILITY,
DEBUG_GET_TX_PKT_FATES,
DEBUG_GET_RX_PKT_FATES,
DEBUG_GET_WAKE_REASON_STATS,
+ DEBUG_GET_FILE_DUMP_BUF,
+ DEBUG_FILE_DUMP_DONE_IND,
+ DEBUG_SET_HAL_START,
+ DEBUG_SET_HAL_STOP,
WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
RTT_ATTRIBUTE_RESULTS_PER_TARGET,
RTT_ATTRIBUTE_RESULT_CNT,
- RTT_ATTRIBUTE_RESULT
+ RTT_ATTRIBUTE_RESULT,
+ RTT_ATTRIBUTE_RESULT_DETAIL
};
enum wifi_rssi_monitor_attr {
};
enum wifi_sae_key_attr {
- BRCM_SAE_KEY_ATTR_BSSID,
+ BRCM_SAE_KEY_ATTR_PEER_MAC,
BRCM_SAE_KEY_ATTR_PMK,
BRCM_SAE_KEY_ATTR_PMKID
};
DEBUG_ATTRIBUTE_LOG_LEVEL,
DEBUG_ATTRIBUTE_LOG_TIME_INTVAL,
DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE,
+// DEBUG_ATTRIBUTE_DUMP_FILENAME,
DEBUG_ATTRIBUTE_FW_DUMP_LEN,
DEBUG_ATTRIBUTE_FW_DUMP_DATA,
DEBUG_ATTRIBUTE_RING_DATA,
DEBUG_ATTRIBUTE_PKT_FATE_DATA
};
+typedef enum {
+ DUMP_LEN_ATTR_INVALID,
+ DUMP_LEN_ATTR_MEMDUMP,
+ DUMP_LEN_ATTR_SSSR_C0_D11_BEFORE,
+ DUMP_LEN_ATTR_SSSR_C0_D11_AFTER,
+ DUMP_LEN_ATTR_SSSR_C1_D11_BEFORE,
+ DUMP_LEN_ATTR_SSSR_C1_D11_AFTER,
+ DUMP_LEN_ATTR_SSSR_DIG_BEFORE,
+ DUMP_LEN_ATTR_SSSR_DIG_AFTER,
+ DUMP_LEN_ATTR_TIMESTAMP,
+ DUMP_LEN_ATTR_GENERAL_LOG,
+ DUMP_LEN_ATTR_ECNTRS,
+ DUMP_LEN_ATTR_SPECIAL_LOG,
+ DUMP_LEN_ATTR_DHD_DUMP,
+ DUMP_LEN_ATTR_EXT_TRAP,
+ DUMP_LEN_ATTR_HEALTH_CHK,
+ DUMP_LEN_ATTR_PRESERVE_LOG,
+ DUMP_LEN_ATTR_COOKIE,
+ DUMP_LEN_ATTR_FLOWRING_DUMP,
+ DUMP_LEN_ATTR_PKTLOG,
+ DUMP_FILENAME_ATTR_DEBUG_DUMP,
+ DUMP_FILENAME_ATTR_MEM_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP,
+ DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP,
+ DUMP_FILENAME_ATTR_PKTLOG_DUMP,
+ DUMP_LEN_ATTR_STATUS_LOG,
+ DUMP_LEN_ATTR_AXI_ERROR,
+ DUMP_FILENAME_ATTR_AXI_ERROR_DUMP,
+ DUMP_LEN_ATTR_RTT_LOG
+} EWP_DUMP_EVENT_ATTRIBUTE;
+
+/* Attributes associated with DEBUG_GET_DUMP_BUF */
+typedef enum {
+ DUMP_BUF_ATTR_INVALID,
+ DUMP_BUF_ATTR_MEMDUMP,
+ DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE,
+ DUMP_BUF_ATTR_SSSR_C0_D11_AFTER,
+ DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE,
+ DUMP_BUF_ATTR_SSSR_C1_D11_AFTER,
+ DUMP_BUF_ATTR_SSSR_DIG_BEFORE,
+ DUMP_BUF_ATTR_SSSR_DIG_AFTER,
+ DUMP_BUF_ATTR_TIMESTAMP,
+ DUMP_BUF_ATTR_GENERAL_LOG,
+ DUMP_BUF_ATTR_ECNTRS,
+ DUMP_BUF_ATTR_SPECIAL_LOG,
+ DUMP_BUF_ATTR_DHD_DUMP,
+ DUMP_BUF_ATTR_EXT_TRAP,
+ DUMP_BUF_ATTR_HEALTH_CHK,
+ DUMP_BUF_ATTR_PRESERVE_LOG,
+ DUMP_BUF_ATTR_COOKIE,
+ DUMP_BUF_ATTR_FLOWRING_DUMP,
+ DUMP_BUF_ATTR_PKTLOG,
+ DUMP_BUF_ATTR_STATUS_LOG,
+ DUMP_BUF_ATTR_AXI_ERROR,
+ DUMP_BUF_ATTR_RTT_LOG
+} EWP_DUMP_CMD_ATTRIBUTE;
+
enum mkeep_alive_attributes {
MKEEP_ALIVE_ATTRIBUTE_ID,
MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
};
typedef enum wl_vendor_event {
+ BRCM_VENDOR_EVENT_UNSPEC = 0,
+ BRCM_VENDOR_EVENT_PRIV_STR = 1,
GOOGLE_GSCAN_SIGNIFICANT_EVENT = 2,
GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT = 3,
GOOGLE_GSCAN_BATCH_SCAN_EVENT = 4,
GOOGLE_NAN_EVENT_TCA = 29,
GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH = 30,
GOOGLE_NAN_EVENT_UNKNOWN = 31,
-
- GOOGLE_ROAM_EVENT_START = 32
-
+ GOOGLE_ROAM_EVENT_START = 32,
+ BRCM_VENDOR_EVENT_HANGED = 33,
+ BRCM_VENDOR_EVENT_SAE_KEY = 34,
+ BRCM_VENDOR_EVENT_BEACON_RECV = 35,
+ BRCM_VENDOR_EVENT_PORT_AUTHORIZED = 36,
+ GOOGLE_FILE_DUMP_EVENT = 37,
+ BRCM_VENDOR_EVENT_CU = 38,
+ BRCM_VENDOR_EVENT_WIPS = 39,
+ NAN_ASYNC_RESPONSE_DISABLED = 40
} wl_vendor_event_t;
enum andr_wifi_attr {
} WLAN_DRIVER_WAKE_REASON_CNT;
#endif /* DHD_WAKE_STATUS */
+#define BRCM_VENDOR_WIPS_EVENT_BUF_LEN 128
+typedef enum wl_vendor_wips_attr_type {
+ WIPS_ATTR_DEAUTH_CNT = 1,
+ WIPS_ATTR_DEAUTH_BSSID,
+ WIPS_ATTR_CURRENT_RSSI,
+ WIPS_ATTR_DEAUTH_RSSI
+} wl_vendor_wips_attr_type_t;
+
/* Chipset roaming capabilities */
typedef struct wifi_roaming_capabilities {
u32 max_blacklist_size;
/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
#define BRCM_VENDOR_SCMD_CAPA "cap"
+#define MEMDUMP_PATH_LEN 128
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
extern int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd);
struct net_device *dev, int event_id, const void *data, int len)
{ return 0; }
static INLINE int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
- struct net_device *dev, void *data, int len, wl_vendor_event_t event)
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event)
{ return 0; }
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
#define SUPP_LOG_LEN 256
#define PRINT_SUPP_LOG(fmt, ...) \
wl_cfgvendor_send_supp_eventstring(__func__, fmt, ##__VA_ARGS__);
-#define SUPP_LOG(args) PRINT_SUPP_LOG args;
+#define SUPP_LOG(args) PRINT_SUPP_LOG args;
#define SUPP_EVT_LOG(evt_name, fmt, ...) \
- wl_cfgvendor_notify_supp_event_str(evt_name, fmt, ##__VA_ARGS__);
+ wl_cfgvendor_notify_supp_event_str(evt_name, fmt, ##__VA_ARGS__);
#define SUPP_EVENT(args) SUPP_EVT_LOG args
#else
#define SUPP_LOG(x)
#define SUPP_EVENT(x)
-#endif /* WL_SUPP_EVENT && ((kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT) */
+#endif /* WL_SUPP_EVENT && (kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT */
#ifdef CONFIG_COMPAT
#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) \
normal_structure.member = value;
#endif /* CONFIG_COMPAT */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, wdev, len, type, kflags);
+#else
+#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
+ cfg80211_vendor_event_alloc(wiphy, len, type, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+ /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+int wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev);
+
#endif /* _wl_cfgvendor_h_ */
* Minimal debug/trace/assert driver definitions for
* Broadcom 802.11 Networking Adapter.
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
#if defined(WL_ESCAN)
-
-#include <typedefs.h>
-#include <linuxver.h>
-#include <osl.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-
-#include <bcmutils.h>
#include <bcmendian.h>
-#include <ethernet.h>
-
#include <linux/if_arp.h>
#include <asm/uaccess.h>
-
-#include <wlioctl.h>
#include <wl_android.h>
-#include <wl_iw.h>
#include <wl_escan.h>
#include <dhd_config.h>
-/* message levels */
-#define ESCAN_ERROR_LEVEL 0x0001
-#define ESCAN_SCAN_LEVEL 0x0002
-#define ESCAN_TRACE_LEVEL 0x0004
-
-#define ESCAN_ERROR(x) \
+#define ESCAN_ERROR(name, arg1, args...) \
do { \
- if (iw_msg_level & ESCAN_ERROR_LEVEL) { \
- printf(KERN_ERR "ESCAN-ERROR) %s : ", __func__); \
- printf x; \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] ESCAN-ERROR) %s : " arg1, name, __func__, ## args); \
} \
} while (0)
-#define ESCAN_SCAN(x) \
+#define ESCAN_TRACE(name, arg1, args...) \
do { \
- if (iw_msg_level & ESCAN_SCAN_LEVEL) { \
- printf(KERN_ERR "ESCAN-SCAN) %s : ", __func__); \
- printf x; \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] ESCAN-TRACE) %s : " arg1, name, __func__, ## args); \
} \
} while (0)
-#define ESCAN_TRACE(x) \
+#define ESCAN_SCAN(name, arg1, args...) \
do { \
- if (iw_msg_level & ESCAN_TRACE_LEVEL) { \
- printf(KERN_ERR "ESCAN-TRACE) %s : ", __func__); \
- printf x; \
+ if (android_msg_level & ANDROID_SCAN_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] ESCAN-SCAN) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define ESCAN_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] ESCAN-DBG) %s : " arg1, name, __func__, ## args); \
} \
} while (0)
}
if (wf_chspec_malformed(chspec)) {
- ESCAN_ERROR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
- chspec));
+ ESCAN_ERROR("wlan", "wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec);
return INVCHANSPEC;
}
chanspec_t lchspec;
if (wf_chspec_malformed(chspec)) {
- ESCAN_ERROR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
- chspec));
+ ESCAN_ERROR("wlan", "wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec);
return INVCHANSPEC;
}
} else {
/* cannot express the bandwidth */
char chanbuf[CHANSPEC_STR_LEN];
- ESCAN_ERROR((
- "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
- "to pre-11ac format\n",
- wf_chspec_ntoa(chspec, chanbuf), chspec));
+ ESCAN_ERROR("wlan", "wl_chspec_to_legacy: unable to convert chanspec %s "
+ "(0x%04X) to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec);
return INVCHANSPEC;
}
return wl_chspec_host_to_driver(ioctl_ver, chanspec);
}
-static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list,
+ struct wl_bss_info *bss)
{
return bss = bss ?
(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
}
-#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
-
-static int
-rssi_to_qual(int rssi)
-{
- if (rssi <= WL_IW_RSSI_NO_SIGNAL)
- return 0;
- else if (rssi <= WL_IW_RSSI_VERY_LOW)
- return 1;
- else if (rssi <= WL_IW_RSSI_LOW)
- return 2;
- else if (rssi <= WL_IW_RSSI_GOOD)
- return 3;
- else if (rssi <= WL_IW_RSSI_VERY_GOOD)
- return 4;
- else
- return 5;
-}
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-(entry) = list_first_entry((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#else
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-(entry) = list_first_entry((ptr), type, member); \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member); \
-
-#endif /* STRICT_GCC_WARNINGS */
-
-static unsigned long wl_lock_eq(struct wl_escan_info *escan)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&escan->eq_lock, flags);
- return flags;
-}
-
-static void wl_unlock_eq(struct wl_escan_info *escan, unsigned long flags)
-{
- spin_unlock_irqrestore(&escan->eq_lock, flags);
-}
-
-static void wl_init_eq(struct wl_escan_info *escan)
-{
- spin_lock_init(&escan->eq_lock);
- INIT_LIST_HEAD(&escan->eq_list);
-}
-
-static void wl_flush_eq(struct wl_escan_info *escan)
-{
- struct escan_event_q *e;
- unsigned long flags;
-
- flags = wl_lock_eq(escan);
- while (!list_empty_careful(&escan->eq_list)) {
- BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
- list_del(&e->eq_list);
- kfree(e);
- }
- wl_unlock_eq(escan, flags);
-}
-
-static struct escan_event_q *wl_deq_event(struct wl_escan_info *escan)
-{
- struct escan_event_q *e = NULL;
- unsigned long flags;
-
- flags = wl_lock_eq(escan);
- if (likely(!list_empty(&escan->eq_list))) {
- BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
- list_del(&e->eq_list);
- }
- wl_unlock_eq(escan, flags);
-
- return e;
-}
-
-/*
- * push event to tail of the queue
- */
-
static s32
-wl_enq_event(struct wl_escan_info *escan, struct net_device *ndev, u32 event,
- const wl_event_msg_t *msg, void *data)
-{
- struct escan_event_q *e;
- s32 err = 0;
- uint32 evtq_size;
- uint32 data_len;
- unsigned long flags;
- gfp_t aflags;
-
- data_len = 0;
- if (data)
- data_len = ntoh32(msg->datalen);
- evtq_size = sizeof(struct escan_event_q) + data_len;
- aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
- e = kzalloc(evtq_size, aflags);
- if (unlikely(!e)) {
- ESCAN_ERROR(("event alloc failed\n"));
- return -ENOMEM;
- }
- e->etype = event;
- memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
- if (data)
- memcpy(e->edata, data, data_len);
- flags = wl_lock_eq(escan);
- list_add_tail(&e->eq_list, &escan->eq_list);
- wl_unlock_eq(escan, flags);
-
- return err;
-}
-
-static void wl_put_event(struct escan_event_q *e)
-{
- kfree(e);
-}
-
-static void wl_wakeup_event(struct wl_escan_info *escan)
-{
- dhd_pub_t *dhd = (dhd_pub_t *)(escan->pub);
-
- if (dhd->up && (escan->event_tsk.thr_pid >= 0)) {
- up(&escan->event_tsk.sema);
- }
-}
-
-static s32 wl_escan_event_handler(void *data)
-{
- struct wl_escan_info *escan = NULL;
- struct escan_event_q *e;
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
-
- escan = (struct wl_escan_info *)tsk->parent;
-
- printf("tsk Enter, tsk = 0x%p\n", tsk);
-
- while (down_interruptible (&tsk->sema) == 0) {
- SMP_RD_BARRIER_DEPENDS();
- if (tsk->terminated) {
- break;
- }
- while (escan && (e = wl_deq_event(escan))) {
- ESCAN_TRACE(("dev=%p, event type (%d), ifidx: %d bssidx: %d \n",
- escan->dev, e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
-
- if (e->emsg.ifidx > WL_MAX_IFS) {
- ESCAN_ERROR(("Event ifidx not in range. val:%d \n", e->emsg.ifidx));
- goto fail;
- }
-
- if (escan->dev && escan->evt_handler[e->etype]) {
- dhd_pub_t *dhd = (struct dhd_pub *)(escan->pub);
- if (dhd->busstate == DHD_BUS_DOWN) {
- ESCAN_ERROR((": BUS is DOWN.\n"));
- } else {
- escan->evt_handler[e->etype](escan, &e->emsg, e->edata);
- }
- } else {
- ESCAN_TRACE(("Unknown Event (%d): ignoring\n", e->etype));
- }
-fail:
- wl_put_event(e);
- DHD_EVENT_WAKE_UNLOCK(escan->pub);
- }
- }
- printf("%s: was terminated\n", __FUNCTION__);
- complete_and_exit(&tsk->completed, 0);
- return 0;
-}
-
-void
-wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = dhd->escan;
- u32 event_type = ntoh32(e->event_type);
-
- if (!escan || !escan->dev) {
- return;
- }
-
- if (escan->event_tsk.thr_pid == -1) {
- ESCAN_ERROR(("Event handler is not created\n"));
- return;
- }
-
- if (escan == NULL) {
- ESCAN_ERROR(("Stale event ignored\n"));
- return;
- }
-
- if (event_type == WLC_E_PFN_NET_FOUND) {
- ESCAN_TRACE(("PNOEVENT: PNO_NET_FOUND\n"));
- }
- else if (event_type == WLC_E_PFN_NET_LOST) {
- ESCAN_TRACE(("PNOEVENT: PNO_NET_LOST\n"));
- }
-
- DHD_EVENT_WAKE_LOCK(escan->pub);
- if (likely(!wl_enq_event(escan, dev, event_type, e, data))) {
- wl_wakeup_event(escan);
- } else {
- DHD_EVENT_WAKE_UNLOCK(escan->pub);
- }
-}
-
-static s32 wl_escan_inform_bss(struct wl_escan_info *escan)
+wl_escan_inform_bss(struct net_device *dev, struct wl_escan_info *escan)
{
struct wl_scan_results *bss_list;
s32 err = 0;
bss_list = escan->bss_list;
+ ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", bss_list->count);
+
/* Delete disconnected cache */
#if defined(BSSCACHE)
- wl_delete_disconnected_bss_cache(&escan->g_bss_cache_ctrl, (u8*)&escan->disconnected_bssid);
+ wl_delete_disconnected_bss_cache(&escan->g_bss_cache_ctrl,
+ (u8*)&escan->disconnected_bssid);
#if defined(RSSIAVG)
- wl_delete_disconnected_rssi_cache(&escan->g_rssi_cache_ctrl, (u8*)&escan->disconnected_bssid);
+ wl_delete_disconnected_rssi_cache(&escan->g_rssi_cache_ctrl,
+ (u8*)&escan->disconnected_bssid);
#endif
#endif
#if defined(RSSIAVG)
wl_update_rssi_cache(&escan->g_rssi_cache_ctrl, bss_list);
if (!in_atomic())
- wl_update_connected_rssi_cache(escan->dev, &escan->g_rssi_cache_ctrl, &rssi);
+ wl_update_connected_rssi_cache(dev, &escan->g_rssi_cache_ctrl, &rssi);
#endif
#if defined(BSSCACHE)
wl_update_bss_cache(&escan->g_bss_cache_ctrl,
wl_delete_dirty_bss_cache(&escan->g_bss_cache_ctrl);
wl_reset_bss_cache(&escan->g_bss_cache_ctrl);
if (escan->autochannel)
- wl_ext_get_best_channel(escan->dev, &escan->g_bss_cache_ctrl,
+ wl_ext_get_best_channel(dev, &escan->g_bss_cache_ctrl,
escan->ioctl_ver &escan->best_2g_ch, &escan->best_5g_ch);
#else
if (escan->autochannel)
- wl_ext_get_best_channel(escan->dev, bss_list, escan->ioctl_ver,
+ wl_ext_get_best_channel(dev, bss_list, escan->ioctl_ver,
&escan->best_2g_ch, &escan->best_5g_ch);
#endif
- ESCAN_TRACE(("scanned AP count (%d)\n", bss_list->count));
-
return err;
}
static wl_scan_params_t *
-wl_escan_alloc_params(struct wl_escan_info *escan, int channel,
- int nprobes, int *out_params_size)
+wl_escan_alloc_params(struct net_device *dev, struct wl_escan_info *escan,
+ int channel, int nprobes, int *out_params_size)
{
wl_scan_params_t *params;
int params_size;
params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
if (params == NULL) {
- ESCAN_ERROR(("mem alloc failed (%d bytes)\n", params_size));
+ ESCAN_ERROR(dev->name, "mem alloc failed (%d bytes)\n", params_size);
return params;
}
memset(params, 0, params_size);
return params;
}
-static void wl_escan_abort(struct wl_escan_info *escan)
+static void
+wl_escan_abort(struct net_device *dev, struct wl_escan_info *escan)
{
wl_scan_params_t *params = NULL;
s32 params_size = 0;
s32 err = BCME_OK;
if (!in_atomic()) {
/* Our scan params only need space for 1 channel and 0 ssids */
- params = wl_escan_alloc_params(escan, -1, 0, ¶ms_size);
+ params = wl_escan_alloc_params(dev, escan, -1, 0, ¶ms_size);
if (params == NULL) {
- ESCAN_ERROR(("scan params allocation failed \n"));
+ ESCAN_ERROR(dev->name, "scan params allocation failed \n");
err = -ENOMEM;
} else {
/* Do a scan abort to stop the driver's scan engine */
- err = wldev_ioctl(escan->dev, WLC_SCAN, params, params_size, true);
+ err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
if (err < 0) {
- ESCAN_ERROR(("scan abort failed \n"));
+ ESCAN_ERROR(dev->name, "scan abort failed \n");
}
kfree(params);
}
}
}
-static s32 wl_notify_escan_complete(struct wl_escan_info *escan, bool fw_abort)
+static s32
+wl_escan_notify_complete(struct net_device *dev,
+ struct wl_escan_info *escan, bool fw_abort)
{
s32 err = BCME_OK;
+#if defined(WL_WIRELESS_EXT)
int cmd = 0;
#if WIRELESS_EXT > 13
union iwreq_data wrqu;
char extra[IW_CUSTOM_MAX + 1];
-
- memset(extra, 0, sizeof(extra));
+#endif
#endif
- ESCAN_TRACE(("Enter\n"));
+ ESCAN_TRACE(dev->name, "Enter\n");
- if (!escan || !escan->dev) {
- ESCAN_ERROR(("escan or dev is null\n"));
- err = BCME_ERROR;
- goto out;
- }
if (fw_abort && !in_atomic())
- wl_escan_abort(escan);
+ wl_escan_abort(dev, escan);
if (timer_pending(&escan->scan_timeout))
del_timer_sync(&escan->scan_timeout);
#if defined(ESCAN_RESULT_PATCH)
escan->bss_list = wl_escan_get_buf(escan);
- wl_escan_inform_bss(escan);
+ wl_escan_inform_bss(dev, escan);
#endif /* ESCAN_RESULT_PATCH */
+#if defined(WL_WIRELESS_EXT)
#if WIRELESS_EXT > 13
#if WIRELESS_EXT > 14
cmd = SIOCGIWSCAN;
#endif
- ESCAN_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
// terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
if (cmd) {
if (cmd == SIOCGIWSCAN) {
- wireless_send_event(escan->dev, cmd, &wrqu, NULL);
+ wireless_send_event(dev, cmd, &wrqu, NULL);
} else
- wireless_send_event(escan->dev, cmd, &wrqu, extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
}
+#endif
#endif
-out:
return err;
}
#ifdef ESCAN_BUF_OVERFLOW_MGMT
static void
-wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+wl_escan_find_removal_candidate(struct wl_escan_info *escan,
+ wl_bss_info_t *bss, removal_element_t *candidate)
{
int idx;
for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
}
static void
-wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
- wl_bss_info_t *bi)
+wl_escan_remove_lowRSSI_info(struct net_device *dev, struct wl_escan_info *escan,
+ wl_scan_results_t *list, removal_element_t *candidate, wl_bss_info_t *bi)
{
int idx1, idx2;
int total_delete_len = 0;
candidate[idx1].RSSI == bss->RSSI &&
candidate[idx1].length == dtoh32(bss->length)) {
u32 delete_len = dtoh32(bss->length);
- ESCAN_TRACE(("delete scan info of " MACDBG " to add new AP\n",
- MAC2STRDBG(bss->BSSID.octet)));
+ ESCAN_DBG(dev->name,
+ "delete scan info of %pM to add new AP\n", &bss->BSSID);
if (idx2 < list->count -1) {
memmove((u8 *)bss, (u8 *)bss + delete_len,
list->buflen - cur_len - delete_len);
}
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-static s32 wl_escan_handler(struct wl_escan_info *escan,
+s32
+wl_escan_handler2(struct net_device *dev, struct wl_escan_info *escan,
const wl_event_msg_t *e, void *data)
{
s32 err = BCME_OK;
u32 i;
u16 channel;
- ESCAN_TRACE(("enter event type : %d, status : %d \n",
- ntoh32(e->event_type), ntoh32(e->status)));
-
mutex_lock(&escan->usr_sync);
escan_result = (wl_escan_result_t *)data;
if (escan->escan_state != ESCAN_STATE_SCANING) {
- ESCAN_TRACE(("Not my scan\n"));
+ ESCAN_DBG(dev->name, "Not my scan\n");
goto exit;
}
+ ESCAN_DBG(dev->name, "enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status));
+
if (status == WLC_E_STATUS_PARTIAL) {
- ESCAN_TRACE(("WLC_E_STATUS_PARTIAL \n"));
+ ESCAN_DBG(dev->name, "WLC_E_STATUS_PARTIAL \n");
if (!escan_result) {
- ESCAN_ERROR(("Invalid escan result (NULL pointer)\n"));
+ ESCAN_ERROR(dev->name, "Invalid escan result (NULL pointer)\n");
goto exit;
}
if (dtoh16(escan_result->bss_count) != 1) {
- ESCAN_ERROR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ ESCAN_ERROR(dev->name, "Invalid bss_count %d: ignoring\n",
+ escan_result->bss_count);
goto exit;
}
bi = escan_result->bss_info;
if (!bi) {
- ESCAN_ERROR(("Invalid escan bss info (NULL pointer)\n"));
+ ESCAN_ERROR(dev->name, "Invalid escan bss info (NULL pointer)\n");
goto exit;
}
bi_length = dtoh32(bi->length);
if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
- ESCAN_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ ESCAN_ERROR(dev->name, "Invalid bss_info length %d: ignoring\n",
+ bi_length);
goto exit;
}
/* +++++ terence 20130524: skip invalid bss */
channel =
- bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
if (!dhd_conf_match_channel(escan->pub, channel))
goto exit;
/* ----- terence 20130524: skip invalid bss */
remove_lower_rssi = TRUE;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
- ESCAN_TRACE(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
+ ESCAN_DBG(dev->name, "%s(%pM) RSSI %d flags 0x%x length %d\n",
+ bi->SSID, &bi->BSSID, bi->RSSI, bi->flags, bi->length);
for (i = 0; i < list->count; i++) {
bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
: list->bss_info;
#ifdef ESCAN_BUF_OVERFLOW_MGMT
- ESCAN_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
- bss->SSID, MAC2STRDBG(bss->BSSID.octet),
- i, bss->RSSI, list->count));
+ ESCAN_DBG(dev->name,
+ "%s(%pM), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, &bss->BSSID, i, bss->RSSI, list->count);
if (remove_lower_rssi)
- wl_cfg80211_find_removal_candidate(bss, candidate);
+ wl_escan_find_removal_candidate(escan, bss, candidate);
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
- == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
- bi->SSID_len == bss->SSID_len &&
- !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+ (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
/* do not allow beacon data to update
*the data recd from a probe response
(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
goto exit;
- ESCAN_TRACE(("%s("MACDBG"), i=%d prev: RSSI %d"
- " flags 0x%x, new: RSSI %d flags 0x%x\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
- bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+ ESCAN_DBG(dev->name,
+ "%s(%pM), i=%d prev: RSSI %d flags 0x%x, "
+ "new: RSSI %d flags 0x%x\n",
+ bss->SSID, &bi->BSSID, i, bss->RSSI, bss->flags,
+ bi->RSSI, bi->flags);
if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
/* preserve max RSSI if the measurements are
* both on-channel or both off-channel
*/
- ESCAN_TRACE(("%s("MACDBG"), same onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
+ ESCAN_DBG(dev->name,
+ "%s(%pM), same onchan, RSSI: prev %d new %d\n",
+ bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
bi->RSSI = MAX(bss->RSSI, bi->RSSI);
} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
/* preserve the on-channel rssi measurement
* if the new measurement is off channel
*/
- ESCAN_TRACE(("%s("MACDBG"), prev onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
+ ESCAN_DBG(dev->name,
+ "%s(%pM), prev onchan, RSSI: prev %d new %d\n",
+ bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
bi->RSSI = bss->RSSI;
bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
}
if (dtoh32(bss->length) != bi_length) {
u32 prev_len = dtoh32(bss->length);
- ESCAN_TRACE(("bss info replacement"
- " is occured(bcast:%d->probresp%d)\n",
- bss->ie_length, bi->ie_length));
- ESCAN_TRACE(("%s("MACDBG"), replacement!(%d -> %d)\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- prev_len, bi_length));
-
- if (list->buflen - prev_len + bi_length
- > ESCAN_BUF_SIZE) {
- ESCAN_ERROR(("Buffer is too small: keep the"
- " previous result of this AP\n"));
+ ESCAN_DBG(dev->name,
+ "bss info replacement occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length);
+ ESCAN_DBG(dev->name,
+ "%s(%pM), replacement!(%d -> %d)\n",
+ bss->SSID, &bi->BSSID, prev_len, bi_length);
+
+ if (list->buflen - prev_len + bi_length > ESCAN_BUF_SIZE) {
+ ESCAN_ERROR(dev->name,
+ "Buffer is too small: keep the previous result "
+ "of this AP\n");
/* Only update RSSI */
bss->RSSI = bi->RSSI;
bss->flags |= (bi->flags
}
if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
#ifdef ESCAN_BUF_OVERFLOW_MGMT
- wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ wl_escan_remove_lowRSSI_info(dev, escan, list, candidate, bi);
if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
- ESCAN_TRACE(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ ESCAN_DBG(dev->name,
+ "RSSI(%pM) is too low(%d) to add Buffer\n",
+ &bi->BSSID, bi->RSSI);
goto exit;
}
#else
- ESCAN_ERROR(("Buffer is too small: ignoring\n"));
+ ESCAN_ERROR(dev->name, "Buffer is too small: ignoring\n");
goto exit;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
}
- if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID
- ESCAN_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID));
- goto exit;
- }
-
memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
list->version = dtoh32(bi->version);
list->buflen += bi_length;
}
}
else if (status == WLC_E_STATUS_SUCCESS) {
- escan->escan_state = ESCAN_STATE_IDLE;
- ESCAN_TRACE(("ESCAN COMPLETED\n"));
+ ESCAN_DBG(dev->name, "ESCAN COMPLETED\n");
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_TRACE(("SCAN COMPLETED: scanned AP count=%d\n",
- escan->bss_list->count));
- wl_escan_inform_bss(escan);
- wl_notify_escan_complete(escan, false);
+ ESCAN_DBG(dev->name, "SCAN COMPLETED: scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_inform_bss(dev, escan);
+ wl_escan_notify_complete(dev, escan, false);
+ escan->escan_state = ESCAN_STATE_IDLE;
} else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
(status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
(status == WLC_E_STATUS_NEWASSOC)) {
/* Handle all cases of scan abort */
- escan->escan_state = ESCAN_STATE_IDLE;
- ESCAN_TRACE(("ESCAN ABORT reason: %d\n", status));
+ ESCAN_DBG(dev->name, "ESCAN ABORT reason: %d\n", status);
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_TRACE(("SCAN ABORT: scanned AP count=%d\n",
- escan->bss_list->count));
- wl_escan_inform_bss(escan);
- wl_notify_escan_complete(escan, false);
+ ESCAN_DBG(dev->name, "SCAN ABORT: scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_inform_bss(dev, escan);
+ wl_escan_notify_complete(dev, escan, false);
+ escan->escan_state = ESCAN_STATE_IDLE;
} else if (status == WLC_E_STATUS_TIMEOUT) {
- ESCAN_ERROR(("WLC_E_STATUS_TIMEOUT\n"));
- ESCAN_ERROR(("reason[0x%x]\n", e->reason));
+ ESCAN_ERROR(dev->name, "WLC_E_STATUS_TIMEOUT\n");
+ ESCAN_ERROR(dev->name, "reason[0x%x]\n", e->reason);
if (e->reason == 0xFFFFFFFF) {
- wl_notify_escan_complete(escan, true);
+ wl_escan_notify_complete(dev, escan, true);
}
escan->escan_state = ESCAN_STATE_IDLE;
} else {
- ESCAN_ERROR(("unexpected Escan Event %d : abort\n", status));
- escan->escan_state = ESCAN_STATE_IDLE;
+ ESCAN_ERROR(dev->name, "unexpected Escan Event %d : abort\n", status);
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_TRACE(("SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
- escan->bss_list->count));
- wl_escan_inform_bss(escan);
- wl_notify_escan_complete(escan, false);
+ ESCAN_DBG(dev->name, "SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
+ escan->bss_list->count);
+ wl_escan_inform_bss(dev, escan);
+ wl_escan_notify_complete(dev, escan, false);
+ escan->escan_state = ESCAN_STATE_IDLE;
}
exit:
mutex_unlock(&escan->usr_sync);
}
static int
-wl_escan_prep(struct wl_escan_info *escan, wl_uint32_list_t *list,
- wl_scan_params_t *params, wlc_ssid_t *ssid)
+wl_escan_prep(struct net_device *dev, struct wl_escan_info *escan,
+ wl_uint32_list_t *list, wl_scan_params_t *params, wlc_ssid_t *ssid, bool bcast)
{
int err = 0;
wl_scan_results_t *results;
u32 n_channels = 0;
uint channel;
chanspec_t chanspec;
+ u32 n_ssids;
results = wl_escan_get_buf(escan);
results->version = 0;
n_channels = dtoh32(list->count);
/* Copy channel array if applicable */
- ESCAN_SCAN(("### List of channelspecs to scan ###\n"));
+ ESCAN_SCAN(dev->name, "### List of channelspecs to scan ###\n");
if (n_channels > 0) {
for (i = 0; i < n_channels; i++) {
channel = dtoh32(list->element[i]);
continue;
chanspec = WL_CHANSPEC_BW_20;
if (chanspec == INVCHANSPEC) {
- ESCAN_ERROR(("Invalid chanspec! Skipping channel\n"));
+ ESCAN_ERROR(dev->name, "Invalid chanspec! Skipping channel\n");
continue;
}
if (channel <= CH_MAX_2G_CHANNEL) {
params->channel_list[j] = channel;
params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
params->channel_list[j] |= chanspec;
- ESCAN_SCAN(("Chan : %d, Channel spec: %x \n",
- channel, params->channel_list[j]));
+ ESCAN_SCAN(dev->name, "Chan : %d, Channel spec: %x\n",
+ channel, params->channel_list[j]);
params->channel_list[j] = wl_chspec_host_to_driver(escan->ioctl_ver,
params->channel_list[j]);
j++;
}
} else {
- ESCAN_SCAN(("Scanning all channels\n"));
+ ESCAN_SCAN(dev->name, "Scanning all channels\n");
}
if (ssid && ssid->SSID_len) {
/* Copy ssid array if applicable */
- ESCAN_SCAN(("### List of SSIDs to scan ###\n"));
+ ESCAN_SCAN(dev->name, "### List of SSIDs to scan ###\n");
offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
ptr = (char*)params + offset;
- ESCAN_SCAN(("0: Broadcast scan\n"));
- memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
- ssid_tmp.SSID_len = 0;
- memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
- ptr += sizeof(wlc_ssid_t);
+ if (bcast) {
+ n_ssids = 2;
+ ESCAN_SCAN(dev->name, "0: Broadcast scan\n");
+ memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+ ssid_tmp.SSID_len = 0;
+ memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ } else {
+ n_ssids = 1;
+ }
memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
ssid_tmp.SSID_len = ssid->SSID_len;
memcpy(ssid_tmp.SSID, ssid->SSID, ssid->SSID_len);
memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
ptr += sizeof(wlc_ssid_t);
- ESCAN_SCAN(("1: scan for %s size=%d\n", ssid_tmp.SSID, ssid_tmp.SSID_len));
+ ESCAN_SCAN(dev->name, "1: scan for %s size=%d\n",
+ ssid_tmp.SSID, ssid_tmp.SSID_len);
/* Adding mask to channel numbers */
params->channel_num =
- htod32((2 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
(n_channels & WL_SCAN_PARAMS_COUNT_MASK));
}
else {
- ESCAN_SCAN(("Broadcast scan\n"));
+ ESCAN_SCAN(dev->name, "Broadcast scan\n");
}
return err;
}
-static int wl_escan_reset(struct wl_escan_info *escan)
+static int
+wl_escan_reset(struct wl_escan_info *escan)
{
if (timer_pending(&escan->scan_timeout))
del_timer_sync(&escan->scan_timeout);
return 0;
}
-static void wl_escan_timeout(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-)
+static void
+wl_escan_timeout(unsigned long data)
{
wl_event_msg_t msg;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct wl_escan_info *escan = from_timer(escan, t, scan_timeout);
-#else
struct wl_escan_info *escan = (struct wl_escan_info *)data;
-#endif
struct wl_scan_results *bss_list;
struct wl_bss_info *bi = NULL;
s32 i;
u32 channel;
+ if (!escan->dev) {
+ ESCAN_ERROR("wlan", "No dev present\n");
+ return;
+ }
+
bss_list = wl_escan_get_buf(escan);
if (!bss_list) {
- ESCAN_ERROR(("bss_list is null. Didn't receive any partial scan results\n"));
+ ESCAN_ERROR(escan->dev->name,
+ "bss_list is null. Didn't receive any partial scan results\n");
} else {
- ESCAN_ERROR(("%s: scanned AP count (%d)\n", __FUNCTION__, bss_list->count));
+ ESCAN_ERROR(escan->dev->name, "scanned AP count (%d)\n", bss_list->count);
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
- ESCAN_ERROR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver,
+ bi->chanspec));
+ ESCAN_ERROR(escan->dev->name, "SSID :%s Channel :%d\n", bi->SSID, channel);
}
}
- if (!escan->dev) {
- ESCAN_ERROR(("No dev present\n"));
- return;
- }
-
bzero(&msg, sizeof(wl_event_msg_t));
- ESCAN_ERROR(("timer expired\n"));
+ ESCAN_ERROR(escan->dev->name, "timer expired\n");
msg.event_type = hton32(WLC_E_ESCAN_RESULT);
msg.status = hton32(WLC_E_STATUS_TIMEOUT);
msg.reason = 0xFFFFFFFF;
- wl_escan_event(escan->dev, &msg, NULL);
-
- // terence 20130729: workaround to fix out of memory in firmware
-// if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) {
-// ESCAN_ERROR(("Send hang event\n"));
-// net_os_send_hang_message(dev);
-// }
+ wl_ext_event_send(escan->pub->event_params, &msg, NULL);
}
int
-wl_escan_set_scan(
- struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra
-)
+wl_escan_set_scan(struct net_device *dev, dhd_pub_t *dhdp,
+ wlc_ssid_t *ssid, bool bcast)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = dhd->escan;
+ struct wl_escan_info *escan = dhdp->escan;
s32 err = BCME_OK;
s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
wl_escan_params_t *params = NULL;
scb_val_t scbval;
static int cnt = 0;
- wlc_ssid_t ssid;
u32 n_channels = 0;
wl_uint32_list_t *list;
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
- s32 val = 0;
-
- ESCAN_TRACE(("Enter \n"));
- if (!escan) {
- ESCAN_ERROR(("device is not ready\n"));
- return -EIO;
- }
- if (escan->escan_state == ESCAN_STATE_DOWN) {
- ESCAN_ERROR(("STATE is down\n"));
- return -EIO;
- }
mutex_lock(&escan->usr_sync);
-
- if (!escan->ioctl_ver) {
- val = 1;
- if ((err = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
- ESCAN_ERROR(("WLC_GET_VERSION failed, err=%d\n", err));
- goto exit;
- }
- val = dtoh32(val);
- if (val != WLC_IOCTL_VERSION && val != 1) {
- ESCAN_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
- val, WLC_IOCTL_VERSION));
- goto exit;
- }
- escan->ioctl_ver = val;
- printf("%s: ioctl_ver=%d\n", __FUNCTION__, val);
- }
-
- /* default Broadcast scan */
- memset(&ssid, 0, sizeof(ssid));
-
-#if WIRELESS_EXT > 17
- /* check for given essid */
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
- memcpy(ssid.SSID, req->essid, ssid.SSID_len);
- ssid.SSID_len = htod32(ssid.SSID_len);
- }
+ if (escan->escan_state == ESCAN_STATE_DOWN) {
+ ESCAN_ERROR(dev->name, "STATE is down\n");
+ err = -EIO;
+ goto exit2;
}
-#endif
if (escan->escan_state == ESCAN_STATE_SCANING) {
- ESCAN_ERROR(("Scanning already\n"));
+ ESCAN_ERROR(dev->name, "Scanning already\n");
goto exit;
}
+ ESCAN_TRACE(dev->name, "Enter \n");
+
/* if scan request is not empty parse scan request paramters */
memset(valid_chan_list, 0, sizeof(valid_chan_list));
list = (wl_uint32_list_t *)(void *) valid_chan_list;
list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl(escan->dev, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), false);
+ err = wldev_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ sizeof(valid_chan_list), false);
if (err != 0) {
- ESCAN_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, err));
+ ESCAN_ERROR(dev->name, "get channels failed with %d\n", err);
goto exit;
}
n_channels = dtoh32(list->count);
params_size += sizeof(u16) * (n_channels + 1);
else
params_size += sizeof(u16) * n_channels;
- if (ssid.SSID_len) {
+ if (ssid && ssid->SSID_len) {
params_size += sizeof(struct wlc_ssid) * 2;
}
err = -ENOMEM;
goto exit;
}
- wl_escan_prep(escan, list, ¶ms->params, &ssid);
+ wl_escan_prep(dev, escan, list, ¶ms->params, ssid, bcast);
params->version = htod32(ESCAN_REQ_VERSION);
params->action = htod16(WL_SCAN_ACTION_START);
wl_escan_set_sync_id(params->sync_id);
if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
- ESCAN_ERROR(("ioctl buffer length not sufficient\n"));
+ ESCAN_ERROR(dev->name, "ioctl buffer length not sufficient\n");
kfree(params);
err = -ENOMEM;
goto exit;
}
params->params.scan_type = DOT11_SCANTYPE_ACTIVE;
- ESCAN_TRACE(("Passive scan_type %d\n", params->params.scan_type));
+ ESCAN_SCAN(dev->name, "Passive scan_type %d\n", params->params.scan_type);
+ WL_MSG(dev->name, "LEGACY_SCAN\n");
err = wldev_iovar_setbuf(dev, "escan", params, params_size,
escan->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- printf("%s: LEGACY_SCAN\n", __FUNCTION__);
if (unlikely(err)) {
if (err == BCME_EPERM)
/* Scan Not permitted at this point of time */
- ESCAN_TRACE(("Escan not permitted at this time (%d)\n", err));
+ ESCAN_TRACE(dev->name, "Escan not permitted at this time (%d)\n", err);
else
- ESCAN_ERROR(("Escan set error (%d)\n", err));
+ ESCAN_ERROR(dev->name, "Escan set error (%d)\n", err);
wl_escan_reset(escan);
}
kfree(params);
if (unlikely(err)) {
/* Don't print Error incase of Scan suppress */
if ((err == BCME_EPERM))
- ESCAN_TRACE(("Escan failed: Scan Suppressed \n"));
+ ESCAN_TRACE(dev->name, "Escan failed: Scan Suppressed\n");
else {
cnt++;
- ESCAN_ERROR(("error (%d), cnt=%d\n", err, cnt));
+ ESCAN_ERROR(dev->name, "error (%d), cnt=%d\n", err, cnt);
// terence 20140111: send disassoc to firmware
if (cnt >= 4) {
memset(&scbval, 0, sizeof(scb_val_t));
wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
- ESCAN_ERROR(("Send disassoc to break the busy dev=%p\n", dev));
+ ESCAN_ERROR(dev->name, "Send disassoc to break the busy\n");
cnt = 0;
}
}
} else {
cnt = 0;
+ escan->dev = dev;
}
+exit2:
mutex_unlock(&escan->usr_sync);
return err;
}
-int
-wl_escan_merge_scan_results(struct net_device *dev, struct iw_request_info *info,
- char *extra, wl_bss_info_t *bi, int *len, int max_size)
+#if defined(WL_WIRELESS_EXT)
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_escan_merge_scan_results(struct net_device *dev, struct wl_escan_info *escan,
+ struct iw_request_info *info, char *extra, wl_bss_info_t *bi, int *len, int max_size)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = dhd->escan;
s32 err = BCME_OK;
struct iw_event iwe;
int j;
rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
#endif
channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
- ESCAN_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
- MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+ ESCAN_SCAN(dev->name, "BSSID=%pM, channel=%3d, RSSI=%3d, SSID=\"%s\"\n",
+ &bi->BSSID, channel, rssi, bi->SSID);
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
}
*len = event - extra;
if (*len < 0)
- ESCAN_ERROR(("==> Wrong size\n"));
+ ESCAN_ERROR(dev->name, "==> Wrong size\n");
exit:
return err;
}
int
-wl_escan_get_scan(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+wl_escan_get_scan(struct net_device *dev, dhd_pub_t *dhdp,
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = dhd->escan;
+ struct wl_escan_info *escan = dhdp->escan;
s32 err = BCME_OK;
int i = 0;
int len_prep = 0, len_ret = 0;
char *buf = NULL;
struct ether_addr cur_bssid;
- ESCAN_TRACE(("%s: %s SIOCGIWSCAN, len=%d\n", __FUNCTION__, dev->name, dwrq->length));
-
- if (!extra)
+ if (!extra) {
+ ESCAN_TRACE(dev->name, "extra is null\n");
return -EINVAL;
+ }
mutex_lock(&escan->usr_sync);
/* Check for scan in progress */
if (escan->escan_state == ESCAN_STATE_SCANING) {
- ESCAN_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+ ESCAN_DBG(dev->name, "SIOCGIWSCAN GET still scanning\n");
err = -EAGAIN;
goto exit;
}
if (!escan->bss_list) {
- ESCAN_ERROR(("%s: scan not ready\n", dev->name));
+ ESCAN_ERROR(dev->name, "scan not ready\n");
err = -EAGAIN;
goto exit;
}
+ if (dev != escan->dev) {
+ ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ESCAN_SCAN(dev->name, "SIOCGIWSCAN, len=%d\n", dwrq->length);
err = wldev_ioctl(dev, WLC_GET_BSSID, &cur_bssid, sizeof(cur_bssid), false);
if (err != BCME_NOTASSOCIATED && memcmp(ðer_null, &cur_bssid, ETHER_ADDR_LEN)) {
// merge current connected bss
buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
if (!buf) {
- ESCAN_ERROR(("buffer alloc failed.\n"));
+ ESCAN_ERROR(dev->name, "buffer alloc failed.\n");
err = BCME_NOMEM;
goto exit;
}
*(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
err = wldev_ioctl(dev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX, false);
if (unlikely(err)) {
- ESCAN_ERROR(("Could not get bss info %d\n", err));
+ ESCAN_ERROR(dev->name, "Could not get bss info %d\n", err);
goto exit;
}
bi = (struct wl_bss_info *)(buf + 4);
len_prep = 0;
- err = wl_escan_merge_scan_results(dev, info, extra+len_ret, bi,
+ err = wl_escan_merge_scan_results(dev, escan, info, extra+len_ret, bi,
&len_prep, buflen_from_user-len_ret);
len_ret += len_prep;
if (err)
bi = node->results.bss_info;
#endif
if (!memcmp(&bi->BSSID, &cur_bssid, ETHER_ADDR_LEN)) {
- ESCAN_SCAN(("skip connected AP %pM\n", &cur_bssid));
+ ESCAN_SCAN(dev->name, "skip connected AP %pM\n", &cur_bssid);
#if defined(BSSCACHE)
node = node->next;
#endif
continue;
}
len_prep = 0;
- err = wl_escan_merge_scan_results(dev, info, extra+len_ret, bi,
+ err = wl_escan_merge_scan_results(dev, escan, info, extra+len_ret, bi,
&len_prep, buflen_from_user-len_ret);
len_ret += len_prep;
if (err)
dwrq->length = len_ret;
dwrq->flags = 0; /* todo */
+ ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", i);
exit:
kfree(buf);
dwrq->length = len_ret;
- ESCAN_SCAN(("scanned AP count (%d)\n", i));
mutex_unlock(&escan->usr_sync);
return err;
}
+#endif
-s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len)
+static void
+wl_escan_deinit(struct net_device *dev, struct wl_escan_info *escan)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = dhd->escan;
- int ret = 0;
- int bytes_written = -1;
-
- sscanf(command, "%*s %d", &escan->autochannel);
-
- if (escan->autochannel == 0) {
- escan->best_2g_ch = 0;
- escan->best_5g_ch = 0;
- } else if (escan->autochannel == 2) {
- bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
- escan->best_2g_ch, escan->best_5g_ch);
- ESCAN_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
- ret = bytes_written;
- }
+ ESCAN_TRACE(dev->name, "Enter\n");
- return ret;
+ del_timer_sync(&escan->scan_timeout);
+ escan->escan_state = ESCAN_STATE_DOWN;
+
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&escan->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&escan->g_bss_cache_ctrl);
+#endif
}
-static s32 wl_create_event_handler(struct wl_escan_info *escan)
+static s32
+wl_escan_init(struct net_device *dev, struct wl_escan_info *escan)
{
- int ret = 0;
- ESCAN_TRACE(("Enter \n"));
+ ESCAN_TRACE(dev->name, "Enter\n");
- /* Do not use DHD in cfg driver */
- escan->event_tsk.thr_pid = -1;
+ /* Init scan_timeout timer */
+ init_timer_compat(&escan->scan_timeout, wl_escan_timeout, escan);
+ escan->escan_state = ESCAN_STATE_IDLE;
- PROC_START(wl_escan_event_handler, escan, &escan->event_tsk, 0, "wl_escan_handler");
- if (escan->event_tsk.thr_pid < 0)
- ret = -ENOMEM;
- return ret;
+ return 0;
}
-static void wl_destroy_event_handler(struct wl_escan_info *escan)
+void
+wl_escan_down(struct net_device *dev, dhd_pub_t *dhdp)
{
- if (escan->event_tsk.thr_pid >= 0)
- PROC_STOP(&escan->event_tsk);
-}
+ struct wl_escan_info *escan = dhdp->escan;
-static void wl_escan_deinit(struct wl_escan_info *escan)
-{
- printf("%s: Enter\n", __FUNCTION__);
+ ESCAN_TRACE(dev->name, "Enter\n");
if (!escan) {
- ESCAN_ERROR(("device is not ready\n"));
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
return;
}
- wl_destroy_event_handler(escan);
- wl_flush_eq(escan);
- del_timer_sync(&escan->scan_timeout);
- escan->escan_state = ESCAN_STATE_DOWN;
-#if defined(RSSIAVG)
- wl_free_rssi_cache(&escan->g_rssi_cache_ctrl);
-#endif
-#if defined(BSSCACHE)
- wl_free_bss_cache(&escan->g_bss_cache_ctrl);
-#endif
- wl_ext_add_remove_pm_enable_work(&escan->conn_info, FALSE);
+ wl_escan_deinit(dev, escan);
}
-static s32 wl_escan_init(struct wl_escan_info *escan)
+int
+wl_escan_up(struct net_device *dev, dhd_pub_t *dhdp)
{
- int err = 0;
+ struct wl_escan_info *escan = dhdp->escan;
+ s32 val = 0;
+ int ret = -1;
- printf("%s: Enter\n", __FUNCTION__);
+ ESCAN_TRACE(dev->name, "Enter\n");
if (!escan) {
- ESCAN_ERROR(("device is not ready\n"));
- return -EIO;
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
}
- /* Init scan_timeout timer */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&escan->scan_timeout, wl_escan_timeout, 0);
-#else
- init_timer(&escan->scan_timeout);
- escan->scan_timeout.data = (unsigned long) escan;
- escan->scan_timeout.function = wl_escan_timeout;
-#endif
-
- if (wl_create_event_handler(escan)) {
- ESCAN_ERROR(("device is not ready\n"));
- err = -ENOMEM;
- goto err;
+ ret = wl_escan_init(dev, escan);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_escan_init ret %d\n", ret);
+ return ret;
}
- memset(escan->evt_handler, 0, sizeof(escan->evt_handler));
- escan->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
- escan->escan_state = ESCAN_STATE_IDLE;
+ if (!escan->ioctl_ver) {
+ val = 1;
+ if ((ret = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
+ ESCAN_ERROR(dev->name, "WLC_GET_VERSION failed, ret=%d\n", ret);
+ return ret;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ ESCAN_ERROR(dev->name,
+ "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION);
+ return ret;
+ }
+ escan->ioctl_ver = val;
+ }
return 0;
-err:
- wl_escan_deinit(escan);
- return err;
}
-void wl_escan_down(dhd_pub_t *dhdp)
+int
+wl_escan_event_dettach(struct net_device *dev, dhd_pub_t *dhdp)
{
struct wl_escan_info *escan = dhdp->escan;
+ int ret = -1;
- wl_escan_deinit(escan);
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
+ }
+
+ wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler2);
+
+ return 0;
}
-int wl_escan_up(struct net_device *net, dhd_pub_t *dhdp)
+int
+wl_escan_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
{
struct wl_escan_info *escan = dhdp->escan;
- int err;
+ int ret = -1;
- err = wl_escan_init(escan);
- if (err) {
- ESCAN_ERROR(("wl_escan_init err %d\n", err));
- return err;
+ if (!escan) {
+ ESCAN_ERROR(dev->name, "escan is NULL\n");
+ return ret;
}
- return 0;
+ ret = wl_ext_event_register(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler2,
+ escan, PRIO_EVENT_ESCAN);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_ext_event_register err %d\n", ret);
+ }
+
+ return ret;
}
-void wl_escan_detach(dhd_pub_t *dhdp)
+void
+wl_escan_detach(struct net_device *dev, dhd_pub_t *dhdp)
{
struct wl_escan_info *escan = dhdp->escan;
- printf("%s: Enter\n", __FUNCTION__);
+ ESCAN_TRACE(dev->name, "Enter\n");
- if (!escan) {
- ESCAN_ERROR(("device is not ready\n"));
+ if (!escan)
return;
- }
-
- wl_escan_deinit(escan);
+ wl_escan_deinit(dev, escan);
if (escan->escan_ioctl_buf) {
kfree(escan->escan_ioctl_buf);
escan->escan_ioctl_buf = NULL;
}
+ wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler2);
+
DHD_OS_PREFREE(dhdp, escan, sizeof(struct wl_escan_info));
dhdp->escan = NULL;
}
wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp)
{
struct wl_escan_info *escan = NULL;
- int err = 0;
+ int ret = 0;
- printf("%s: Enter\n", __FUNCTION__);
+ ESCAN_TRACE(dev->name, "Enter\n");
- if (!dev)
- return 0;
- escan = (wl_escan_info_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_WL_ESCAN_INFO, sizeof(struct wl_escan_info));
+ escan = (struct wl_escan_info *)DHD_OS_PREALLOC(dhdp,
+ DHD_PREALLOC_WL_ESCAN, sizeof(struct wl_escan_info));
if (!escan)
return -ENOMEM;
- dhdp->escan = (void *)escan;
memset(escan, 0, sizeof(struct wl_escan_info));
+ dhdp->escan = escan;
+
/* we only care about main interface so save a global here */
- escan->dev = dev;
escan->pub = dhdp;
- escan->conn_info.dev = dev;
- escan->conn_info.dhd = dhdp;
escan->escan_state = ESCAN_STATE_DOWN;
escan->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
if (unlikely(!escan->escan_ioctl_buf)) {
- ESCAN_ERROR(("Ioctl buf alloc failed\n"));
- goto err ;
+ ESCAN_ERROR(dev->name, "Ioctl buf alloc failed\n");
+ ret = -ENOMEM;
+ goto exit;
}
- wl_init_eq(escan);
- err = wl_escan_init(escan);
- if (err) {
- ESCAN_ERROR(("wl_escan_init err %d\n", err));
- return err;
+ ret = wl_escan_init(dev, escan);
+ if (ret) {
+ ESCAN_ERROR(dev->name, "wl_escan_init err %d\n", ret);
+ goto exit;
}
mutex_init(&escan->usr_sync);
- mutex_init(&escan->conn_info.pm_sync);
- INIT_DELAYED_WORK(&escan->conn_info.pm_enable_work, wl_ext_pm_work_handler);
return 0;
-err:
- wl_escan_detach(dhdp);
- return -ENOMEM;
+
+exit:
+ wl_escan_detach(dev, dhdp);
+ return ret;
}
#endif /* WL_ESCAN */
\r
#ifndef _wl_escan_\r
#define _wl_escan_\r
-\r
-#include <linux/wireless.h>\r
#include <wl_iw.h>\r
-#include <dngl_stats.h>\r
-#include <dhd.h>\r
-#include <linux/time.h>\r
-\r
-\r
-#ifdef DHD_MAX_IFS\r
-#define WL_MAX_IFS DHD_MAX_IFS\r
-#else\r
-#define WL_MAX_IFS 16\r
-#endif\r
\r
#define ESCAN_BUF_SIZE (64 * 1024)\r
\r
#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */\r
\r
-/* event queue for cfg80211 main event */\r
-struct escan_event_q {\r
- struct list_head eq_list;\r
- u32 etype;\r
- wl_event_msg_t emsg;\r
- s8 edata[1];\r
-};\r
-\r
-struct pmk_list {\r
- pmkid_list_t pmkids;\r
- pmkid_t foo[MAXPMKID - 1];\r
-};\r
-\r
/* donlge escan state */\r
enum escan_state {\r
ESCAN_STATE_DOWN,\r
ESCAN_STATE_SCANING\r
};\r
\r
-struct wl_escan_info;\r
-\r
-typedef s32(*ESCAN_EVENT_HANDLER) (struct wl_escan_info *escan,\r
- const wl_event_msg_t *e, void *data);\r
-\r
typedef struct wl_escan_info {\r
struct net_device *dev;\r
dhd_pub_t *pub;\r
struct timer_list scan_timeout; /* Timer for catch scan event timeout */\r
int escan_state;\r
int ioctl_ver;\r
-\r
- char ioctlbuf[WLC_IOCTL_SMLEN];\r
u8 escan_buf[ESCAN_BUF_SIZE];\r
struct wl_scan_results *bss_list;\r
- struct wl_scan_results *scan_results;\r
struct ether_addr disconnected_bssid;\r
u8 *escan_ioctl_buf;\r
- spinlock_t eq_lock; /* for event queue synchronization */\r
- struct list_head eq_list; /* used for event queue */\r
- tsk_ctl_t event_tsk; /* task of main event handler thread */\r
- ESCAN_EVENT_HANDLER evt_handler[WLC_E_LAST];\r
struct mutex usr_sync; /* maily for up/down synchronization */\r
int autochannel;\r
int best_2g_ch;\r
#if defined(BSSCACHE)\r
wl_bss_cache_ctrl_t g_bss_cache_ctrl;\r
#endif\r
- struct pmk_list pmk_list;\r
- wl_conn_info_t conn_info;\r
} wl_escan_info_t;\r
\r
-void wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data);\r
-\r
-int wl_escan_set_scan(\r
- struct net_device *dev,\r
- struct iw_request_info *info,\r
- union iwreq_data *wrqu,\r
- char *extra\r
-);\r
-int wl_escan_get_scan(struct net_device *dev, struct iw_request_info *info,\r
- struct iw_point *dwrq, char *extra);\r
-s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len);\r
+int wl_escan_set_scan(struct net_device *dev, dhd_pub_t *dhdp,\r
+ wlc_ssid_t *ssid, bool bcast);\r
+int wl_escan_get_scan(struct net_device *dev, dhd_pub_t *dhdp,\r
+ struct iw_request_info *info, struct iw_point *dwrq, char *extra);\r
+s32 wl_escan_handler2(struct net_device *dev, struct wl_escan_info *escan,\r
+ const wl_event_msg_t *e, void *data);\r
int wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp);\r
-void wl_escan_detach(dhd_pub_t *dhdp);\r
-int wl_escan_up(struct net_device *net, dhd_pub_t *dhdp);\r
-void wl_escan_down(dhd_pub_t *dhdp);\r
+void wl_escan_detach(struct net_device *dev, dhd_pub_t *dhdp);\r
+int wl_escan_event_attach(struct net_device *dev, dhd_pub_t *dhdp);\r
+int wl_escan_event_dettach(struct net_device *dev, dhd_pub_t *dhdp);\r
+int wl_escan_up(struct net_device *dev, dhd_pub_t *dhdp);\r
+void wl_escan_down(struct net_device *dev, dhd_pub_t *dhdp);\r
\r
#endif /* _wl_escan_ */\r
\r
--- /dev/null
+
+#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
+#include <bcmendian.h>
+#include <wl_android.h>
+#include <dhd_config.h>
+
+#define EVENT_ERROR(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define EVENT_TRACE(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+#define EVENT_DBG(name, arg1, args...) \
+ do { \
+ if (android_msg_level & ANDROID_DBG_LEVEL) { \
+ printk(KERN_ERR "[dhd-%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
+ } \
+ } while (0)
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+#ifdef DHD_MAX_IFS
+#define WL_MAX_IFS DHD_MAX_IFS
+#else
+#define WL_MAX_IFS 16
+#endif
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+typedef s32(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
+ const wl_event_msg_t *e, void *data);
+
+typedef struct event_handler_list {
+ struct event_handler_list *next;
+ struct net_device *dev;
+ uint32 etype;
+ EXT_EVENT_HANDLER cb_func;
+ void *cb_argu;
+ wl_event_prio_t prio;
+} event_handler_list_t;
+
+typedef struct event_handler_head {
+ event_handler_list_t *evt_head;
+} event_handler_head_t;
+
+typedef struct wl_event_params {
+ dhd_pub_t *pub;
+ struct net_device *dev[WL_MAX_IFS];
+ struct event_handler_head evt_head;
+ struct list_head eq_list; /* used for event queue */
+ spinlock_t eq_lock; /* for event queue synchronization */
+ struct workqueue_struct *event_workq; /* workqueue for event */
+ struct work_struct event_work; /* work item for event */
+ struct mutex event_sync;
+} wl_event_params_t;
+
+static unsigned long
+wl_ext_event_lock_eq(struct wl_event_params *event_params)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_params->eq_lock, flags);
+ return flags;
+}
+
+static void
+wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
+{
+ spin_unlock_irqrestore(&event_params->eq_lock, flags);
+}
+
+static void
+wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
+{
+ spin_lock_init(&event_params->eq_lock);
+}
+
+static void
+wl_ext_event_init_eq(struct wl_event_params *event_params)
+{
+ wl_ext_event_init_eq_lock(event_params);
+ INIT_LIST_HEAD(&event_params->eq_list);
+}
+
+static void
+wl_ext_event_flush_eq(struct wl_event_params *event_params)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_ext_event_lock_eq(event_params);
+ while (!list_empty_careful(&event_params->eq_list)) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_ext_event_unlock_eq(event_params, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *
+wl_ext_event_deq_event(struct wl_event_params *event_params)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_ext_event_lock_eq(event_params);
+ if (likely(!list_empty(&event_params->eq_list))) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_ext_event_unlock_eq(event_params, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
+ const wl_event_msg_t *msg, void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ EVENT_ERROR("wlan", "event alloc failed\n");
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_ext_event_lock_eq(event_params);
+ list_add_tail(&e->eq_list, &event_params->eq_list);
+ wl_ext_event_unlock_eq(event_params, flags);
+
+ return err;
+}
+
+static void
+wl_ext_event_put_event(struct wl_event_q *e)
+{
+ kfree(e);
+}
+
+static void
+wl_ext_event_handler(struct work_struct *work_data)
+{
+ struct wl_event_params *event_params = NULL;
+ struct wl_event_q *e;
+ struct net_device *dev = NULL;
+ struct event_handler_list *evt_node;
+ dhd_pub_t *dhd;
+
+ BCM_SET_CONTAINER_OF(event_params, work_data, struct wl_event_params, event_work);
+ DHD_EVENT_WAKE_LOCK(event_params->pub);
+ while ((e = wl_ext_event_deq_event(event_params))) {
+ if (e->emsg.ifidx >= DHD_MAX_IFS) {
+ EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
+ goto fail;
+ }
+ dev = event_params->dev[e->emsg.ifidx];
+ if (!dev) {
+ EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
+ goto fail;
+ }
+ dhd = dhd_get_pub(dev);
+ if (e->etype > WLC_E_LAST) {
+ EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
+ goto fail;
+ }
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ EVENT_ERROR(dev->name, "BUS is DOWN.\n");
+ goto fail;
+ }
+ EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
+ mutex_lock(&event_params->event_sync);
+ evt_node = event_params->evt_head.evt_head;
+ for (;evt_node;) {
+ if (evt_node->dev == dev &&
+ (evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
+ evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
+ evt_node = evt_node->next;
+ }
+ mutex_unlock(&event_params->event_sync);
+fail:
+ wl_ext_event_put_event(e);
+ }
+ DHD_EVENT_WAKE_UNLOCK(event_params->pub);
+}
+
+void
+wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
+{
+ struct wl_event_params *event_params = params;
+ u32 event_type = ntoh32(e->event_type);
+
+ if (event_params == NULL) {
+ EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
+ event_type, bcmevent_get_name(event_type));
+ return;
+ }
+
+ if (event_params->event_workq == NULL) {
+ EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
+ event_type, bcmevent_get_name(event_type));
+ return;
+ }
+
+ if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
+ queue_work(event_params->event_workq, &event_params->event_work);
+ }
+}
+
+static s32
+wl_ext_event_create_handler(struct wl_event_params *event_params)
+{
+ int ret = 0;
+ EVENT_TRACE("wlan", "Enter\n");
+
+ /* Allocate workqueue for event */
+ if (!event_params->event_workq) {
+ event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ }
+
+ if (!event_params->event_workq) {
+ EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
+ ret = -ENOMEM;
+ } else {
+ INIT_WORK(&event_params->event_work, wl_ext_event_handler);
+ }
+ return ret;
+}
+
+static void
+wl_ext_event_free(struct wl_event_params *event_params)
+{
+ struct event_handler_list *node, *cur, **evt_head;
+
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+
+ for (;node;) {
+ EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
+ cur = node;
+ node = cur->next;
+ kfree(cur);
+ }
+ *evt_head = NULL;
+}
+
+static void
+wl_ext_event_destroy_handler(struct wl_event_params *event_params)
+{
+ if (event_params && event_params->event_workq) {
+ cancel_work_sync(&event_params->event_work);
+ destroy_workqueue(event_params->event_workq);
+ event_params->event_workq = NULL;
+ }
+}
+
+int
+wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
+ void *cb_func, void *data, wl_event_prio_t prio)
+{
+ struct wl_event_params *event_params = dhd->event_params;
+ struct event_handler_list *node, *leaf, *node_prev, **evt_head;
+ int ret = 0;
+
+ if (event_params) {
+ mutex_lock(&event_params->event_sync);
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+ for (;node;) {
+ if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
+ EVENT_TRACE(dev->name, "skip event %d\n", event);
+ mutex_unlock(&event_params->event_sync);
+ return 0;
+ }
+ node = node->next;
+ }
+ leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
+ if (!leaf) {
+ EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
+ (int)sizeof(event_handler_list_t), event);
+ mutex_unlock(&event_params->event_sync);
+ return -ENOMEM;
+ }
+ leaf->next = NULL;
+ leaf->dev = dev;
+ leaf->etype = event;
+ leaf->cb_func = cb_func;
+ leaf->cb_argu = data;
+ leaf->prio = prio;
+ if (*evt_head == NULL) {
+ *evt_head = leaf;
+ } else {
+ node = *evt_head;
+ node_prev = NULL;
+ for (;node;) {
+ if (node->prio <= prio) {
+ leaf->next = node;
+ if (node_prev)
+ node_prev->next = leaf;
+ else
+ *evt_head = leaf;
+ break;
+ } else if (node->next == NULL) {
+ node->next = leaf;
+ break;
+ }
+ node_prev = node;
+ node = node->next;
+ }
+ }
+ EVENT_TRACE(dev->name, "event %d registered\n", event);
+ mutex_unlock(&event_params->event_sync);
+ } else {
+ EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+void
+wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
+ uint32 event, void *cb_func)
+{
+ struct wl_event_params *event_params = dhd->event_params;
+ struct event_handler_list *node, *prev, **evt_head;
+ int tmp = 0;
+
+ if (event_params) {
+ mutex_lock(&event_params->event_sync);
+ evt_head = &event_params->evt_head.evt_head;
+ node = *evt_head;
+ prev = node;
+ for (;node;) {
+ if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
+ if (node == *evt_head) {
+ tmp = 1;
+ *evt_head = node->next;
+ } else {
+ tmp = 0;
+ prev->next = node->next;
+ }
+ EVENT_TRACE(dev->name, "event %d deregistered\n", event);
+ kfree(node);
+ if (tmp == 1) {
+ node = *evt_head;
+ prev = node;
+ } else {
+ node = prev->next;
+ }
+ continue;
+ }
+ prev = node;
+ node = node->next;
+ }
+ mutex_unlock(&event_params->event_sync);
+ } else {
+ EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
+ }
+}
+
+static s32
+wl_ext_event_init_priv(struct wl_event_params *event_params)
+{
+ s32 err = 0;
+
+ mutex_init(&event_params->event_sync);
+ wl_ext_event_init_eq(event_params);
+ if (wl_ext_event_create_handler(event_params))
+ return -ENOMEM;
+
+ return err;
+}
+
+static void
+wl_ext_event_deinit_priv(struct wl_event_params *event_params)
+{
+ wl_ext_event_destroy_handler(event_params);
+ wl_ext_event_flush_eq(event_params);
+ wl_ext_event_free(event_params);
+}
+
+int
+wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_event_params *event_params = dhd->event_params;
+
+ EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
+ if (event_params && ifidx < WL_MAX_IFS) {
+ event_params->dev[ifidx] = net;
+ }
+
+ return 0;
+}
+
+int
+wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
+{
+ struct dhd_pub *dhd = dhd_get_pub(net);
+ struct wl_event_params *event_params = dhd->event_params;
+
+ EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
+ if (event_params && ifidx < WL_MAX_IFS) {
+ event_params->dev[ifidx] = NULL;
+ }
+
+ return 0;
+}
+
+s32
+wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
+{
+ struct wl_event_params *event_params = NULL;
+ s32 err = 0;
+
+ event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
+ if (!event_params) {
+ EVENT_ERROR(dev->name, "Failed to allocate memory (%zu)\n",
+ sizeof(wl_event_params_t));
+ return -ENOMEM;
+ }
+ dhdp->event_params = event_params;
+ memset(event_params, 0, sizeof(wl_event_params_t));
+ event_params->pub = dhdp;
+
+ err = wl_ext_event_init_priv(event_params);
+ if (err) {
+ EVENT_ERROR(dev->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
+ goto ext_attach_out;
+ }
+
+ return err;
+ext_attach_out:
+ wl_ext_event_dettach(dhdp);
+ return err;
+}
+
+void
+wl_ext_event_dettach(dhd_pub_t *dhdp)
+{
+ struct wl_event_params *event_params = dhdp->event_params;
+
+ if (event_params) {
+ wl_ext_event_deinit_priv(event_params);
+ kfree(event_params);
+ dhdp->event_params = NULL;
+ }
+}
+#endif
#ifdef WL_NAN
#include <wlioctl_utils.h>
#endif
+#include <wl_iw.h>
#include <wl_android.h>
#ifdef WL_ESCAN
#include <wl_escan.h>
#endif
#include <dhd_config.h>
-/* message levels */
-#define WL_ERROR_LEVEL 0x0001
-#define WL_SCAN_LEVEL 0x0002
-#define WL_ASSOC_LEVEL 0x0004
-#define WL_INFORM_LEVEL 0x0008
-#define WL_WSEC_LEVEL 0x0010
-#define WL_PNO_LEVEL 0x0020
-#define WL_COEX_LEVEL 0x0040
-#define WL_SOFTAP_LEVEL 0x0080
-#define WL_TRACE_LEVEL 0x0100
-
uint iw_msg_level = WL_ERROR_LEVEL;
-#define WL_ERROR(x) do {if (iw_msg_level & WL_ERROR_LEVEL) printf x;} while (0)
-#define WL_SCAN(x) do {if (iw_msg_level & WL_SCAN_LEVEL) printf x;} while (0)
-#define WL_ASSOC(x) do {if (iw_msg_level & WL_ASSOC_LEVEL) printf x;} while (0)
-#define WL_INFORM(x) do {if (iw_msg_level & WL_INFORM_LEVEL) printf x;} while (0)
-#define WL_WSEC(x) do {if (iw_msg_level & WL_WSEC_LEVEL) printf x;} while (0)
-#define WL_PNO(x) do {if (iw_msg_level & WL_PNO_LEVEL) printf x;} while (0)
-#define WL_COEX(x) do {if (iw_msg_level & WL_COEX_LEVEL) printf x;} while (0)
-#define WL_SOFTAP(x) do {if (iw_msg_level & WL_SOFTAP_LEVEL) printf x;} while (0)
-#define WL_TRACE(x) do {if (iw_msg_level & WL_TRACE_LEVEL) printf x;} while (0)
+#define WL_ERROR_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_ERROR_LEVEL) { \
+ printk(KERN_ERR "[dhd] WEXT-ERROR) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_TRACE_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_TRACE_LEVEL) { \
+ printk(KERN_ERR "[dhd] WEXT-TRACE) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_SCAN_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_SCAN_LEVEL) { \
+ printk(KERN_ERR "[dhd] WEXT-SCAN) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_WSEC_MSG(x, args...) \
+ do { \
+ if (iw_msg_level & WL_WSEC_LEVEL) { \
+ printk(KERN_ERR "[dhd] WEXT-WSEC) %s : " x, __func__, ## args); \
+ } \
+ } while (0)
+#define WL_ERROR(x) WL_ERROR_MSG x
+#define WL_TRACE(x) WL_TRACE_MSG x
+#define WL_SCAN(x) WL_SCAN_MSG x
+#define WL_WSEC(x) WL_WSEC_MSG x
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
-#include <wl_iw.h>
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
char iscan_buf[WLC_IW_ISCAN_MAXLEN];
} iscan_buf_t;
-struct pmk_list {
- pmkid_list_t pmkids;
- pmkid_t foo[MAXPMKID - 1];
-};
-
typedef struct iscan_info {
struct net_device *dev;
struct timer_list timer;
struct semaphore sysioc_sem;
struct completion sysioc_exited;
char ioctlbuf[WLC_IOCTL_SMLEN];
- struct pmk_list pmk_list;
- wl_conn_info_t conn_info;
} iscan_info_t;
static void wl_iw_timerfunc(ulong data);
static void wl_iw_set_event_mask(struct net_device *dev);
static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
-#endif /* WL_ESCAN */
+#endif /* !WL_ESCAN */
+
+struct pmk_list {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
+};
+
+typedef struct wl_wext_info {
+ struct net_device *dev;
+ dhd_pub_t *dhd;
+ struct delayed_work pm_enable_work;
+ struct mutex pm_sync;
+ struct wl_conn_info conn_info;
+ struct pmk_list pmk_list;
+#ifndef WL_ESCAN
+ struct iscan_info iscan;
+#endif
+} wl_wext_info_t;
/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
typedef struct priv_link {
}
#endif /* WIRELESS_EXT > 12 */
+#define WL_PM_ENABLE_TIMEOUT 10000
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop")
+#else
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member);
+#endif /* STRICT_GCC_WARNINGS */
+
+void wl_ext_pm_work_handler(struct work_struct *work)
+{
+ struct wl_wext_info *wext_info;
+ s32 pm = PM_FAST;
+ dhd_pub_t *dhd;
+ int ret = 0;
+
+ BCM_SET_CONTAINER_OF(wext_info, work, struct wl_wext_info, pm_enable_work.work);
+
+ WL_TRACE(("%s: Enter\n", __FUNCTION__));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+
+ dhd = (dhd_pub_t *)(wext_info->dhd);
+ if (!dhd || !wext_info->dhd->up) {
+ WL_TRACE(("%s: dhd is null or not up\n", __FUNCTION__));
+ return;
+ }
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ ret = dev_wlc_ioctl(wext_info->dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (ret)
+ WL_ERROR(("%s: WLC_SET_PM failed %d\n", __FUNCTION__, ret));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ DHD_PM_WAKE_UNLOCK(wext_info->dhd);
+
+}
+
+void wl_ext_add_remove_pm_enable_work(struct wl_wext_info *wext_info,
+ bool add)
+{
+ u16 wq_duration = 0;
+ s32 pm = PM_OFF;
+ int ret = 0;
+
+ if (wext_info == NULL || wext_info->dhd == NULL)
+ return;
+
+ mutex_lock(&wext_info->pm_sync);
+ /*
+ * Make cancel and schedule work part mutually exclusive
+ * so that while cancelling, we are sure that there is no
+ * work getting scheduled.
+ */
+
+ if (delayed_work_pending(&wext_info->pm_enable_work)) {
+ cancel_delayed_work_sync(&wext_info->pm_enable_work);
+ DHD_PM_WAKE_UNLOCK(wext_info->dhd);
+ }
+
+ if (add) {
+ wq_duration = (WL_PM_ENABLE_TIMEOUT);
+ }
+
+ /* It should schedule work item only if driver is up */
+ if (wq_duration && wext_info->dhd->up) {
+ if (dhd_conf_get_pm(wext_info->dhd) >= 0)
+ pm = dhd_conf_get_pm(wext_info->dhd);
+ ret = dev_wlc_ioctl(wext_info->dev, WLC_SET_PM, &pm, sizeof(pm));
+ if (ret)
+ WL_ERROR(("%s: WLC_SET_PM failed %d\n", __FUNCTION__, ret));
+ if (schedule_delayed_work(&wext_info->pm_enable_work,
+ msecs_to_jiffies((const unsigned int)wq_duration))) {
+ DHD_PM_WAKE_LOCK_TIMEOUT(wext_info->dhd, wq_duration);
+ } else {
+ WL_ERROR(("%s: Can't schedule pm work handler\n", __FUNCTION__));
+ }
+ }
+ mutex_unlock(&wext_info->pm_sync);
+
+}
+
static void
wl_iw_update_connect_status(struct net_device *dev, enum wl_ext_status status)
{
int cur_eapol_status = 0;
int wpa_auth = 0;
int error = -EINVAL;
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
- if (!dhd || !dhd->conf || !conn_info)
+ if (!dhd || !dhd->conf)
return;
-
+ wext_info = dhd->wext_info;
cur_eapol_status = dhd->conf->eapol_status;
if (status == WL_EXT_STATUS_CONNECTING) {
- wl_ext_add_remove_pm_enable_work(conn_info, TRUE);
+ wl_ext_add_remove_pm_enable_work(wext_info, TRUE);
if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &wpa_auth))) {
- WL_ERROR(("%s: wpa_auth get error %d\n", __FUNCTION__, error));
+ WL_ERROR(("wpa_auth get error %d\n", error));
return;
}
if (wpa_auth & (WPA_AUTH_PSK|WPA2_AUTH_PSK))
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_START;
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_START;
else
dhd->conf->eapol_status = EAPOL_STATUS_NONE;
} else if (status == WL_EXT_STATUS_ADD_KEY) {
- dhd->conf->eapol_status = EAPOL_STATUS_WPA_END;
+ dhd->conf->eapol_status = EAPOL_STATUS_4WAY_DONE;
} else if (status == WL_EXT_STATUS_DISCONNECTING) {
- wl_ext_add_remove_pm_enable_work(conn_info, FALSE);
- if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
- cur_eapol_status < EAPOL_STATUS_WPA_END) {
- WL_ERROR(("%s: WPA failed at %d\n", __FUNCTION__, cur_eapol_status));
+ wl_ext_add_remove_pm_enable_work(wext_info, FALSE);
+ if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
+ cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
+ WL_ERROR(("WPA failed at %d\n", cur_eapol_status));
dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
- WL_ERROR(("%s: WPS failed at %d\n", __FUNCTION__, cur_eapol_status));
+ } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
+ cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
+ WL_ERROR(("WPS failed at %d\n", cur_eapol_status));
dhd->conf->eapol_status = EAPOL_STATUS_NONE;
}
} else if (status == WL_EXT_STATUS_DISCONNECTED) {
- if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
- cur_eapol_status < EAPOL_STATUS_WPA_END) {
- WL_ERROR(("%s: WPA failed at %d\n", __FUNCTION__, cur_eapol_status));
+ if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
+ cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
+ WL_ERROR(("WPA failed at %d\n", cur_eapol_status));
dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
- WL_ERROR(("%s: WPS failed at %d\n", __FUNCTION__, cur_eapol_status));
+ } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
+ cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
+ WL_ERROR(("WPS failed at %d\n", cur_eapol_status));
dhd->conf->eapol_status = EAPOL_STATUS_NONE;
}
}
bzero(&bssid, sizeof(struct sockaddr));
if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
- WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_REASSOC failed (%d)\n", error));
return error;
}
return 0;
}
+#define DHD_CHECK(dhd, dev) \
+ if (!dhd) { \
+ WL_ERROR (("[dhd-%s] %s: dhd is NULL\n", dev->name, __FUNCTION__)); \
+ return -ENODEV; \
+ } \
+
static int
wl_iw_set_freq(
struct net_device *dev,
int error, chan;
uint sf = 0;
struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
/* Setting by channel number */
if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
}
chan = wf_mhz2channel(fwrq->m, sf);
}
- if (conn_info)
- conn_info->channel = chan;
- WL_ERROR(("%s: chan=%d\n", __FUNCTION__, chan));
+ if (wext_info)
+ wext_info->conn_info.channel = chan;
+ WL_MSG(dev->name, "chan=%d\n", chan);
chan = htod32(chan);
if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) {
- WL_ERROR(("%s: WLC_SET_CHANNEL failed (%d).\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_SET_CHANNEL failed (%d).\n", error));
return error;
}
{
int infra = 0, ap = 0, error = 0;
struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
- if (conn_info) {
- memset(&conn_info->ssid, 0, sizeof(wlc_ssid_t));
- memset(&conn_info->bssid, 0, sizeof(struct ether_addr));
- conn_info->channel = 0;
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ if (wext_info) {
+ memset(&wext_info->conn_info.ssid, 0, sizeof(wlc_ssid_t));
+ memset(&wext_info->conn_info.bssid, 0, sizeof(struct ether_addr));
+ wext_info->conn_info.channel = 0;
}
switch (*uwrq) {
{
int error = -EINVAL;
struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
-
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
if (awrq->sa_family != ARPHRD_ETHER) {
- WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+ WL_ERROR(("Invalid Header...sa_family\n"));
return -EINVAL;
}
if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
scb_val_t scbval;
bzero(&scbval, sizeof(scb_val_t));
- WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
- WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
}
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
return 0;
* eabuf)));
*/
/* Reassociate to the specified AP */
- if (conn_info)
- memcpy(&conn_info->bssid, awrq->sa_data, ETHER_ADDR_LEN);
- if (conn_info && conn_info->ssid.SSID_len) {
- if ((error = wl_ext_connect(dev, conn_info)))
+ if (wext_info)
+ memcpy(&wext_info->conn_info.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+ if (wext_info && wext_info->conn_info.ssid.SSID_len) {
+ if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
return error;
} else {
if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
- WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_REASSOC failed (%d).\n", error));
return error;
}
- WL_ERROR(("%s: join BSSID="MACSTR"\n", __FUNCTION__, MAC2STR((u8 *)awrq->sa_data)));
+ WL_MSG(dev->name, "join BSSID="MACSTR"\n", MAC2STR((u8 *)awrq->sa_data));
}
wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
if (mlme->cmd == IW_MLME_DISASSOC) {
scbval.val = htod32(scbval.val);
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
}
else if (mlme->cmd == IW_MLME_DEAUTH) {
scbval.val = htod32(scbval.val);
+ WL_MSG(dev->name, "WLC_SCB_DEAUTHENTICATE_FOR_REASON\n");
error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
sizeof(scb_val_t));
}
else {
- WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+ WL_ERROR(("Invalid ioctl data.\n"));
return error;
}
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
int i;
int16 rssi;
struct dhd_pub *dhd = dhd_get_pub(dev);
- if (dhd && dhd->iscan)
- iscan = (iscan_info_t *)dhd->iscan;
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ iscan = &wext_info->iscan;
if (!extra)
return -EINVAL;
return 0;
}
+#endif
static int
wl_iw_iscan_set_scan(
char *extra
)
{
- wlc_ssid_t ssid;
struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ wlc_ssid_t ssid;
+#ifndef WL_ESCAN
iscan_info_t *iscan;
- if (dhd && dhd->iscan)
- iscan = (iscan_info_t *)dhd->iscan;
+#endif
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ /* default Broadcast scan */
+ memset(&ssid, 0, sizeof(ssid));
+#if WIRELESS_EXT > 17
+ /* check for given essid */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+ return wl_escan_set_scan(dev, dhd, &ssid, TRUE);
+#else
+ iscan = &wext_info->iscan;
WL_TRACE(("%s: SIOCSIWSCAN iscan=%p\n", dev->name, iscan));
/* use backup if our thread is not successful */
iscan->timer_on = 1;
return 0;
+#endif
}
-#endif /* WL_ESCAN */
#if WIRELESS_EXT > 17
static bool
}
#endif /* WIRELESS_EXT > 17 */
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+ size_t len, int uppercase)
+{
+ size_t i;
+ char *pos = buf, *end = buf + buf_size;
+ int ret;
+ if (buf_size == 0)
+ return 0;
+ for (i = 0; i < len; i++) {
+ ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+ data[i]);
+ if (ret < 0 || ret >= end - pos) {
+ end[-1] = '\0';
+ return pos - buf;
+ }
+ pos += ret;
+ }
+ end[-1] = '\0';
+ return pos - buf;
+}
+
+/**
+ * wpa_snprintf_hex - Print data as a hex string into a buffer
+ * @buf: Memory area to use as the output buffer
+ * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
+ * @data: Data to be printed
+ * @len: Length of data in bytes
+ * Returns: Number of bytes written
+ */
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+ return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif /* BCMWAPI_WPI */
#ifndef WL_ESCAN
static
#if WIRELESS_EXT > 17
struct iw_event iwe;
char *event;
+#ifdef BCMWAPI_WPI
+ char *buf;
+ int custom_event_len;
+#endif
event = *event_p;
if (bi->ie_length) {
break;
}
}
+
+#ifdef BCMWAPI_WPI
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+ WL_TRACE(("found a WAPI IE...\n"));
+#ifdef WAPI_IE_USE_GENIE
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else /* using CUSTOM event */
+ iwe.cmd = IWEVCUSTOM;
+ custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+ iwe.u.data.length = custom_event_len;
+
+ buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+ if (buf == NULL)
+ {
+ WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+ break;
+ }
+ memcpy(buf, "wapi_ie=", 8);
+ wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+ wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+ wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+ kfree(buf);
+#endif /* WAPI_IE_USE_GENIE */
+ break;
+ }
+#endif /* BCMWAPI_WPI */
*event_p = event;
}
int16 rssi;
int channel;
- WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
+ WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
if (!extra)
return -EINVAL;
// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
- WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
- __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+ WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
return 0;
}
+#endif /* WL_ESCAN */
static int
wl_iw_iscan_get_scan(
char *extra
)
{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+#ifndef WL_ESCAN
wl_scan_results_t *list;
struct iw_event iwe;
wl_bss_info_t *bi = NULL;
iscan_buf_t * p_buf;
int16 rssi;
int channel;
- struct dhd_pub *dhd = dhd_get_pub(dev);
iscan_info_t *iscan;
- if (dhd && dhd->iscan)
- iscan = (iscan_info_t *)dhd->iscan;
+#endif
- WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ return wl_escan_get_scan(dev, dhd, info, dwrq, extra);
+#else
+ WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
if (!extra)
return -EINVAL;
/* use backup if our thread is not successful */
+ iscan = &wext_info->iscan;
if ((!iscan) || (iscan->sysioc_pid < 0)) {
return wl_iw_get_scan(dev, info, dwrq, extra);
}
// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
- WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
- __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+ WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
dwrq->length = event - extra;
dwrq->flags = 0; /* todo */
- WL_SCAN(("%s: apcnt=%d\n", __FUNCTION__, apcnt));
+ WL_SCAN(("apcnt=%d\n", apcnt));
return 0;
+#endif
}
-#endif /* WL_ESCAN */
#endif /* WIRELESS_EXT > 13 */
wlc_ssid_t ssid;
int error;
struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
/* default Broadcast SSID */
memset(&ssid, 0, sizeof(ssid));
memcpy(ssid.SSID, extra, ssid.SSID_len);
ssid.SSID_len = htod32(ssid.SSID_len);
- if (conn_info) {
- memcpy(conn_info->ssid.SSID, ssid.SSID, ssid.SSID_len);
- conn_info->ssid.SSID_len = ssid.SSID_len;
+ if (wext_info) {
+ memcpy(wext_info->conn_info.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ wext_info->conn_info.ssid.SSID_len = ssid.SSID_len;
}
- if (conn_info && memcmp(ðer_null, &conn_info->bssid, ETHER_ADDR_LEN)) {
- if ((error = wl_ext_connect(dev, conn_info)))
+ if (wext_info && memcmp(ðer_null, &wext_info->conn_info.bssid, ETHER_ADDR_LEN)) {
+ if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
return error;
} else {
if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) {
- WL_ERROR(("%s: WLC_SET_SSID failed (%d).\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_SET_SSID failed (%d).\n", error));
return error;
}
- WL_ERROR(("%s: join SSID=%s\n", __FUNCTION__, ssid.SSID));
+ WL_MSG(dev->name, "join SSID=\"%s\"\n", ssid.SSID);
}
wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
}
else {
scb_val_t scbval;
bzero(&scbval, sizeof(scb_val_t));
- WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
+ WL_MSG(dev->name, "WLC_DISASSOC\n");
if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
- WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
return error;
}
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
return -EINVAL;
if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
- WL_ERROR(("Error getting the SSID\n"));
+ WL_ERROR(("Error getting the SSID %d\n", error));
return error;
}
char *extra
)
{
+#if defined(BCMWAPI_WPI)
+ uchar buf[WLC_IOCTL_SMLEN] = {0};
+ uchar *p = buf;
+ int wapi_ie_size;
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ if (extra[0] == DOT11_MNG_WAPI_ID)
+ {
+ wapi_ie_size = iwp->length;
+ memcpy(p, extra, iwp->length);
+ dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+ }
+ else
+#endif
dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
return 0;
bcopy(keystring, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
- WL_WSEC(("%s: set key %s\n", __FUNCTION__, keystring));
+ WL_WSEC(("set key %s\n", keystring));
error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
if (error) {
- WL_ERROR(("%s: WLC_SET_WSEC_PMK error %d\n", __FUNCTION__, error));
+ WL_ERROR(("WLC_SET_WSEC_PMK error %d\n", error));
return error;
}
}
case IW_ENCODE_ALG_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
break;
+#ifdef BCMWAPI_WPI
+ case IW_ENCODE_ALG_SM4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ key.flags &= ~WL_PRIMARY_KEY;
+ }
+ break;
+#endif
default:
break;
}
char eabuf[ETHER_ADDR_STR_LEN];
pmkid_t *pmkid_array = NULL;
struct dhd_pub *dhd = dhd_get_pub(dev);
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- pmk_list = &escan->pmk_list;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- pmk_list = &iscan->pmk_list;
- }
-#endif
+ wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ pmk_list = &wext_info->pmk_list;
if (pmk_list)
pmkid_array = pmk_list->pmkids.pmkid;
iwpmksa = (struct iw_pmksa *)extra;
WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
printf("\n");
}
- WL_TRACE(("\n"));
dev_wlc_bufvar_set(dev, "pmkid_info", (char *)pmk_list, sizeof(struct pmk_list));
return 0;
}
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+#ifdef BCMWAPI_WPI
+ else if (paramval & IW_AUTH_WAPI_VERSION_1)
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%d: setting wpa_auth to 0x%0x\n", __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
break;
}
if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
- WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
+ WL_ERROR(("wsec error %d\n", error));
return error;
}
- WL_WSEC(("%s: get wsec=0x%x\n", __FUNCTION__, val));
+ WL_WSEC(("get wsec=0x%x\n", val));
cipher_combined = iw->gwsec | iw->pwsec;
val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
val |= TKIP_ENABLED;
if (cipher_combined & IW_AUTH_CIPHER_CCMP)
val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+ val &= ~SMS4_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+ val |= SMS4_ENABLED;
+#endif
if (iw->privacy_invoked && !val) {
- WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
- "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+ WL_WSEC(("%s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name));
if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
return error;
}
}
- WL_WSEC(("%s: set wsec=0x%x\n", __FUNCTION__, val));
+ WL_WSEC(("set wsec=0x%x\n", val));
if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
- WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
+ WL_ERROR(("wsec error %d\n", error));
return error;
}
* handshake.
*/
if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
- WL_WSEC(("%s: get fbt_cap=0x%x\n", __FUNCTION__, fbt_cap));
+ WL_WSEC(("get fbt_cap=0x%x\n", fbt_cap));
if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) {
- WL_ERROR(("%s: sup_wpa 1 error %d\n", __FUNCTION__, error));
+ WL_ERROR(("sup_wpa 1 error %d\n", error));
return error;
}
}
else if (val == 0) {
if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) {
- WL_ERROR(("%s: sup_wpa 0 error %d\n", __FUNCTION__, error));
+ WL_ERROR(("sup_wpa 0 error %d\n", error));
return error;
}
}
case IW_AUTH_KEY_MGMT:
if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
- WL_ERROR(("%s: wpa_auth error %d\n", __FUNCTION__, error));
+ WL_ERROR(("wpa_auth error %d\n", error));
return error;
}
- WL_WSEC(("%s: get wpa_auth to %d\n", __FUNCTION__, val));
+ WL_WSEC(("get wpa_auth to %d\n", val));
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
val |= WPA2_AUTH_FT;
}
- WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+#ifdef BCMWAPI_WPI
+ if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
break;
case IW_AUTH_80211_AUTH_ALG:
/* open shared */
- WL_ERROR(("Setting the D11auth %d\n", paramval));
+ WL_MSG(dev->name, "Setting the D11auth %d\n", paramval);
if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
val = 0;
else if (paramval & IW_AUTH_ALG_SHARED_KEY)
case IW_AUTH_WPA_ENABLED:
if (paramval == 0) {
val = 0;
- WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
error = dev_wlc_intvar_set(dev, "wpa_auth", val);
return error;
}
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+ WL_TRACE(("IW_AUTH_ROAMING_CONTROL\n"));
/* driver control or user space app control */
break;
#endif /* WIRELESS_EXT > 17 */
+#ifdef BCMWAPI_WPI
+
+ case IW_AUTH_WAPI_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+ if (paramval) {
+ val |= SMS4_ENABLED;
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("setting wsec to 0x%0x returned error %d\n",
+ val, error));
+ return error;
+ }
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+ WL_ERROR(("setting wpa_auth(%d) returned %d\n",
+ WAPI_AUTH_UNSPECIFIED,
+ error));
+ return error;
+ }
+ }
+
+ break;
+
+#endif /* BCMWAPI_WPI */
default:
break;
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+ WL_ERROR(("IW_AUTH_ROAMING_CONTROL\n"));
/* driver control or user space app control */
break;
(iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */
#endif
#if WIRELESS_EXT > 13
-#ifdef WL_ESCAN
- (iw_handler) wl_escan_set_scan, /* SIOCSIWSCAN */
- (iw_handler) wl_escan_get_scan, /* SIOCGIWSCAN */
-#else
(iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */
(iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */
-#endif
#else /* WIRELESS_EXT > 13 */
(iw_handler) NULL, /* SIOCSIWSCAN */
(iw_handler) NULL, /* SIOCGIWSCAN */
int max_tokens = 0, ret = 0;
#ifndef WL_ESCAN
struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
iscan_info_t *iscan;
- if (dhd && dhd->iscan)
- iscan = (iscan_info_t *)dhd->iscan;
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+ iscan = &wext_info->iscan;
#endif
if (cmd < SIOCIWFIRST ||
#endif /* IW_CUSTOM_MAX */
void
-wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+wl_iw_event(struct net_device *dev, struct wl_wext_info *wext_info,
+ wl_event_msg_t *e, void* data)
{
#if WIRELESS_EXT > 13
union iwreq_data wrqu;
uint32 datalen = ntoh32(e->datalen);
uint32 status = ntoh32(e->status);
uint32 reason = ntoh32(e->reason);
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_conn_info_t *conn_info = NULL;
-#ifdef WL_ESCAN
- wl_escan_info_t *escan;
- if (dhd && dhd->escan) {
- escan = (wl_escan_info_t *)dhd->escan;
- conn_info = &escan->conn_info;
- }
-#else
- iscan_info_t *iscan;
- if (dhd && dhd->iscan) {
- iscan = (iscan_info_t *)dhd->iscan;
- conn_info = &iscan->conn_info;
- }
+#ifndef WL_ESCAN
+ iscan_info_t *iscan = &wext_info->iscan;
#endif
memset(&wrqu, 0, sizeof(wrqu));
case WLC_E_DEAUTH:
case WLC_E_DISASSOC:
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
- printf("%s: disconnected with "MACSTR", event %d, reason %d\n",
- __FUNCTION__, MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+ WL_MSG(dev->name, "disconnected with "MACSTR", event %d, reason %d\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
break;
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC_IND:
cmd = SIOCGIWAP;
- wrqu.data.length = strlen(extra);
+ WL_MSG(dev->name, "disconnected with "MACSTR", event %d, reason %d\n",
+ MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
bzero(&extra, ETHER_ADDR_LEN);
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
- printf("%s: disconnected with "MACSTR", event %d, reason %d\n",
- __FUNCTION__, MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
break;
case WLC_E_LINK:
cmd = SIOCGIWAP;
if (!(flags & WLC_EVENT_MSG_LINK)) {
- printf("%s: Link Down with "MACSTR", reason=%d\n", __FUNCTION__,
+ WL_MSG(dev->name, "Link Down with "MACSTR", reason=%d\n",
MAC2STR((u8 *)wrqu.addr.sa_data), reason);
bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
bzero(&extra, ETHER_ADDR_LEN);
wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
} else {
- printf("%s: Link UP with "MACSTR"\n", __FUNCTION__,
+ WL_MSG(dev->name, "Link UP with "MACSTR"\n",
MAC2STR((u8 *)wrqu.addr.sa_data));
}
break;
}
#endif /* WIRELESS_EXT > 17 */
-#ifdef WL_ESCAN
- case WLC_E_ESCAN_RESULT:
- WL_TRACE(("event WLC_E_ESCAN_RESULT\n"));
- wl_escan_event(dev, e, data);
- break;
-#else
-
+#ifndef WL_ESCAN
case WLC_E_SCAN_COMPLETE:
#if WIRELESS_EXT > 14
cmd = SIOCGIWSCAN;
break;
}
default:
- WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type));
+ WL_ERROR(("%d: Unsupported type %d\n", __LINE__, type));
break;
}
return res;
phy_noise = 0;
if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) {
- WL_ERROR(("%s: WLC_GET_PHY_NOISE error=%d\n", __FUNCTION__, res));
+ WL_ERROR(("WLC_GET_PHY_NOISE error=%d\n", res));
goto done;
}
memset(&scb_val, 0, sizeof(scb_val));
if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) {
- WL_ERROR(("%s: WLC_GET_RSSI error=%d\n", __FUNCTION__, res));
+ WL_ERROR(("WLC_GET_RSSI error=%d\n", res));
goto done;
}
memset(&revinfo, 0, sizeof(revinfo));
res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
if (res) {
- WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res));
+ WL_ERROR(("WLC_GET_REVINFO failed %d\n", res));
goto done;
}
corerev = dtoh32(revinfo.corerev);
#ifdef WL_NAN
res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, MAX_WLIW_IOCTL_LEN, corerev);
if (res) {
- WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res));
+ WL_ERROR(("wl_cntbuf_to_xtlv_format failed %d\n", res));
goto done;
}
#ifndef WL_ESCAN
static void
-wl_iw_timerfunc(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- struct timer_list *t
-#else
- unsigned long data
-#endif
-)
+wl_iw_timerfunc(unsigned long data)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- iscan_info_t *iscan = from_timer(iscan, t, timer);
-#else
iscan_info_t *iscan = (iscan_info_t *)data;
-#endif
+
iscan->timer_on = 0;
if (iscan->iscan_state != ISCAN_STATE_IDLE) {
WL_TRACE(("timer trigger\n"));
uint32 status;
iscan_info_t *iscan = (iscan_info_t *)data;
- printf("%s: thread Enter\n", __FUNCTION__);
+ WL_MSG("wlan", "thread Enter\n");
DAEMONIZE("iscan_sysioc");
status = WL_SCAN_RESULTS_PARTIAL;
break;
}
}
- printf("%s: was terminated\n", __FUNCTION__);
+ WL_MSG("wlan", "was terminated\n");
complete_and_exit(&iscan->sysioc_exited, 0);
}
+#endif /* !WL_ESCAN */
-void wl_iw_down(dhd_pub_t *dhdp)
+void
+wl_iw_detach(struct net_device *dev, dhd_pub_t *dhdp)
{
- iscan_info_t *iscan = dhdp->iscan;
+ wl_wext_info_t *wext_info = dhdp->wext_info;
+#ifndef WL_ESCAN
+ iscan_buf_t *buf;
+ iscan_info_t *iscan;
+#endif
+ if (!wext_info)
+ return;
- if (iscan)
- wl_ext_add_remove_pm_enable_work(&iscan->conn_info, FALSE);
+#ifndef WL_ESCAN
+ iscan = &wext_info->iscan;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
+ }
+
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+#endif
+ wl_ext_add_remove_pm_enable_work(wext_info, FALSE);
+ wl_ext_event_deregister(dev, dhdp, WLC_E_LAST, wl_iw_event);
+ if (wext_info) {
+ kfree(wext_info);
+ dhdp->wext_info = NULL;
+ }
}
int
wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp)
{
+ wl_wext_info_t *wext_info = NULL;
+ int ret = 0;
+#ifndef WL_ESCAN
iscan_info_t *iscan = NULL;
-
- printf("%s: Enter\n", __FUNCTION__);
+#endif
if (!dev)
return 0;
+ WL_TRACE(("Enter\n"));
- iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
- if (!iscan)
+ wext_info = (void *)kzalloc(sizeof(struct wl_wext_info), GFP_KERNEL);
+ if (!wext_info)
return -ENOMEM;
- dhdp->iscan = (void *)iscan;
- memset(iscan, 0, sizeof(iscan_info_t));
+ memset(wext_info, 0, sizeof(wl_wext_info_t));
+ wext_info->dev = dev;
+ wext_info->dhd = dhdp;
+ wext_info->conn_info.bssidx = 0;
+ dhdp->wext_info = (void *)wext_info;
+
+#ifndef WL_ESCAN
+ iscan = &wext_info->iscan;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
iscan->kthread = NULL;
#endif
iscan->sysioc_pid = -1;
/* we only care about main interface so save a global here */
iscan->dev = dev;
- iscan->conn_info.dev = dev;
- iscan->conn_info.dhd = dhdp;
iscan->iscan_state = ISCAN_STATE_IDLE;
/* Set up the timer */
iscan->timer_ms = 2000;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
- timer_setup(&iscan->timer, wl_iw_timerfunc, 0);
-#else
- init_timer(&iscan->timer);
- iscan->timer.data = (ulong)iscan;
- iscan->timer.function = wl_iw_timerfunc;
-#endif
+ init_timer_compat(&iscan->scan_timeout, wl_iw_timerfunc, iscan);
sema_init(&iscan->sysioc_sem, 0);
init_completion(&iscan->sysioc_exited);
#else
iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
#endif
- if (iscan->sysioc_pid < 0)
- return -ENOMEM;
- mutex_init(&iscan->conn_info.pm_sync);
- INIT_DELAYED_WORK(&iscan->conn_info.pm_enable_work, wl_ext_pm_work_handler);
- return 0;
+ if (iscan->sysioc_pid < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+#endif
+ mutex_init(&wext_info->pm_sync);
+ INIT_DELAYED_WORK(&wext_info->pm_enable_work, wl_ext_pm_work_handler);
+ ret = wl_ext_event_register(dev, dhdp, WLC_E_LAST, wl_iw_event, dhdp->wext_info,
+ PRIO_EVENT_WEXT);
+ if (ret) {
+ WL_ERROR(("wl_ext_event_register err %d\n", ret));
+ goto exit;
+ }
+
+ return ret;
+exit:
+ wl_iw_detach(dev, dhdp);
+ return ret;
}
-void wl_iw_detach(dhd_pub_t *dhdp)
+void
+wl_iw_down(struct net_device *dev, dhd_pub_t *dhdp)
{
- iscan_buf_t *buf;
- iscan_info_t *iscan = dhdp->iscan;
- if (!iscan)
+ wl_wext_info_t *wext_info = NULL;
+
+ if (dhdp) {
+ wext_info = dhdp->wext_info;
+ } else {
+ WL_ERROR (("dhd is NULL\n"));
return;
- if (iscan->sysioc_pid >= 0) {
- KILL_PROC(iscan->sysioc_pid, SIGTERM);
- wait_for_completion(&iscan->sysioc_exited);
}
- wl_ext_add_remove_pm_enable_work(&iscan->conn_info, FALSE);
+ wl_ext_add_remove_pm_enable_work(wext_info, FALSE);
+}
- while (iscan->list_hdr) {
- buf = iscan->list_hdr->next;
- kfree(iscan->list_hdr);
- iscan->list_hdr = buf;
+int
+wl_iw_up(struct net_device *dev, dhd_pub_t *dhdp)
+{
+ wl_wext_info_t *wext_info = NULL;
+ int ret = 0;
+
+ if (dhdp) {
+ wext_info = dhdp->wext_info;
+ } else {
+ WL_ERROR (("dhd is NULL\n"));
+ return -ENODEV;
}
- kfree(iscan);
- dhdp->iscan = NULL;
+
+ return ret;
+}
+
+s32
+wl_iw_autochannel(struct net_device *dev, char* command, int total_len)
+{
+ struct dhd_pub *dhd = dhd_get_pub(dev);
+ wl_wext_info_t *wext_info = NULL;
+ int ret = 0;
+#ifdef WL_ESCAN
+ int bytes_written = -1;
+#endif
+
+ DHD_CHECK(dhd, dev);
+ wext_info = dhd->wext_info;
+#ifdef WL_ESCAN
+ sscanf(command, "%*s %d", &dhd->escan->autochannel);
+ if (dhd->escan->autochannel == 0) {
+ dhd->escan->best_2g_ch = 0;
+ dhd->escan->best_5g_ch = 0;
+ } else if (dhd->escan->autochannel == 2) {
+ bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+ dhd->escan->best_2g_ch, dhd->escan->best_5g_ch);
+ WL_TRACE(("command result is %s\n", command));
+ ret = bytes_written;
+ }
+#endif
+
+ return ret;
}
-#endif /* !WL_ESCAN */
#endif /* USE_IW */
#endif /* WIRELESS_EXT > 12 */
extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
int wl_iw_send_priv_event(struct net_device *dev, char *flag);
#ifdef WL_ESCAN
int wl_iw_handle_scanresults_ies(char **event_p, char *end,
struct iw_request_info *info, wl_bss_info_t *bi);
-#else
-int wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp);
-void wl_iw_detach(dhd_pub_t *dhdp);
-void wl_iw_down(dhd_pub_t *dhdp);
#endif
+int wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp);
+void wl_iw_detach(struct net_device *dev, dhd_pub_t *dhdp);
+int wl_iw_up(struct net_device *dev, dhd_pub_t *dhdp);
+void wl_iw_down(struct net_device *dev, dhd_pub_t *dhdp);
+s32 wl_iw_autochannel(struct net_device *dev, char* command, int total_len);
+
+/* message levels */
+#define WL_ERROR_LEVEL (1 << 0)
+#define WL_TRACE_LEVEL (1 << 1)
+#define WL_INFO_LEVEL (1 << 2)
+#define WL_SCAN_LEVEL (1 << 3)
+#define WL_WSEC_LEVEL (1 << 4)
#define CSCAN_COMMAND "CSCAN "
#define CSCAN_TLV_PREFIX 'S'
/*
* Broadcom Dongle Host Driver (DHD), Linux monitor network interface
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
/*
* Linux roam cache
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_roam.c 729257 2017-10-31 05:37:07Z $
+ * $Id: wl_roam.c 798173 2019-01-07 09:23:21Z $
*/
#include <typedefs.h>
#include <wl_cfg80211.h>
#endif // endif
#include <wldev_common.h>
+#include <bcmstdlib_s.h>
#ifdef ESCAN_CHANNEL_CACHE
#define MAX_ROAM_CACHE 200
WL_DBG(("CHSPEC = %s, CTL %d\n", wf_chspec_ntoa_ex(bi->chanspec, chanbuf), channel));
roam_cache[n_roam_cache].chanspec =
(channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
- memcpy(roam_cache[n_roam_cache].ssid, bi->SSID, bi->SSID_len);
+ (void)memcpy_s(roam_cache[n_roam_cache].ssid, bi->SSID_len, bi->SSID, bi->SSID_len);
n_roam_cache++;
}
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wldev_common.c 754283 2018-03-27 02:51:46Z $
+ * $Id: wldev_common.c 786015 2018-10-24 08:21:53Z $
*/
#include <osl.h>
#include <bcmutils.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
+#include <wl_cfgscan.h>
#endif /* WL_CFG80211 */
#include <dhd_config.h>
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2018, Broadcom.
+ * Copyright (C) 1999-2019, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wldev_common.h 695669 2017-04-21 13:04:52Z $
+ * $Id: wldev_common.h 813004 2019-04-03 07:16:21Z $
*/
#ifndef __WLDEV_COMMON_H__
#define __WLDEV_COMMON_H__
extern int net_os_set_dtim_skip(struct net_device *dev, int val);
extern int net_os_set_suspend_disable(struct net_device *dev, int val);
extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
extern int net_os_set_max_dtim_enable(struct net_device *dev, int val);
+#ifdef DISABLE_DTIM_IN_SUSPEND
+extern int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val);
+#endif /* DISABLE_DTIM_IN_SUSPEND */
extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
int max, int *bytes_left);