From: Rongjun Chen Date: Fri, 9 Aug 2019 08:54:05 +0000 (+0800) Subject: wifi: update driver to 100.10.545.2 to support STA/AP concurrent [1/2] X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=965f77c4b3af197e85dda3dd2b52c3f22a126437;p=GitHub%2FLineageOS%2FG12%2Fandroid_hardware_amlogic_kernel-modules_dhd-driver.git wifi: update driver to 100.10.545.2 to support STA/AP concurrent [1/2] PD#SWPL-12561 Problem: STA/AP concurrent support Solution: update driver to fix it Verify: ampere Change-Id: I47f18a31e4e8d1049497b252ad8ecc04c27d7a0d Signed-off-by: Rongjun Chen --- diff --git a/bcmdhd.100.10.315.x/Makefile b/bcmdhd.100.10.315.x/Makefile index 9884970..580df9c 100644 --- a/bcmdhd.100.10.315.x/Makefile +++ b/bcmdhd.100.10.315.x/Makefile @@ -1,16 +1,24 @@ # bcmdhd -# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4, -# otherwise please remove it. # if not confiure pci mode, we use sdio mode as default ifeq ($(CONFIG_BCMDHD_PCIE),) $(info bcm SDIO driver configured) CONFIG_DHD_USE_STATIC_BUF := y endif + +ifeq ($(CONFIG_BCMDHD_SDIO),y) +MODULE_NAME := dhd +else +MODULE_NAME := bcmdhd +endif +#CONFIG_BCMDHD := m #CONFIG_BCMDHD_SDIO := y #CONFIG_BCMDHD_PCIE := y #CONFIG_BCMDHD_USB := y + +#CONFIG_BCMDHD_CUSB := y CONFIG_BCMDHD_PROPTXSTATUS := y +#CONFIG_BCMDHD_DEBUG := y CONFIG_MACH_PLATFORM := y #CONFIG_BCMDHD_DTS := y @@ -20,14 +28,15 @@ export CONFIG_BCMDHD_OOB = y export CONFIG_VTS_SUPPORT = y DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ - -Wno-maybe-uninitialized -Werror \ - -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ + -Wno-maybe-uninitialized -Werror \ + -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \ -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT \ -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP \ - -DWL_EXT_IAPSTA \ + -DPOWERUP_MAX_RETRY=0 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \ + -DWL_EXT_IAPSTA -DWL_ESCAN \ -DENABLE_INSMOD_NO_FW_LOAD \ -I$(src) -I$(src)/include @@ -36,14 +45,16 @@ DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \ - hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o \ - dhd_config.o wl_android_ext.o + hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \ + dhd_linux_exportfs.o dhd_linux_pktdump.o \ + dhd_config.o wl_event.o wl_android_ext.o wl_escan.o #BCMDHD_SDIO ifneq ($(CONFIG_BCMDHD_SDIO),) DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \ -DSDTEST -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \ - -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD + -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \ + -DBCMSDIO_RXLIM_POST ifeq ($(CONFIG_BCMDHD_OOB),y) DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y) @@ -52,7 +63,6 @@ endif else DHDCFLAGS += -DSDIO_ISR_THREAD endif - DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \ dhd_sdio.o dhd_cdc.o dhd_wlfc.o endif @@ -61,12 +71,13 @@ endif ifneq ($(CONFIG_BCMDHD_PCIE),) DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \ -DDONGLE_ENABLE_ISOLATION +DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP +DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED ifneq ($(CONFIG_PCI_MSI),) DHDCFLAGS += -DDHD_USE_MSI endif - DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \ - dhd_msgbuf.o + dhd_msgbuf.o dhd_linux_lb.o endif #BCMDHD_USB @@ -79,7 +90,6 @@ ifneq ($(CONFIG_BCMDHD_CUSB),) DHDCFLAGS += -DBCMUSBDEV_COMPOSITE DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS)) endif - DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o endif @@ -96,21 +106,40 @@ ifneq ($(CONFIG_CFG80211),) endif endif +ifeq ($(CONFIG_64BIT),y) + DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS)) + DHDCFLAGS += -DBCMDMA64OSL +endif + #VTS_SUPPORT ifeq ($(CONFIG_VTS_SUPPORT),y) ifneq ($(CONFIG_CFG80211),) -DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG \ - -DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DPKT_FILTER_SUPPORT \ - -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS \ - -DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP - +DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT \ + -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS \ + -DDEBUGABILITY -DDBG_PKT_MON -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \ + -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS DHDOFILES += dhd_rtt.o bcm_app_utils.o endif endif +# For Debug +ifneq ($(CONFIG_BCMDHD_DEBUG),) +DHDCFLAGS += -DDEBUGFS_CFG80211 +DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \ + -DBCMASSERT_LOG -DSI_ERROR_ENFORCE +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DEWP_EDL + DHDCFLAGS += -DDNGL_EVENT_SUPPORT + DHDCFLAGS += -DDHD_SSSR_DUMP +endif +endif + # MESH support for kernel 3.10 later ifeq ($(CONFIG_WL_MESH),y) DHDCFLAGS += -DWLMESH +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DWLMESH_CFG80211 +endif ifneq ($(CONFIG_BCMDHD_PCIE),) DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000 endif @@ -119,14 +148,15 @@ endif DHDCFLAGS :=$(filter-out -DSET_RANDOM_MAC_SOFTAP,$(DHDCFLAGS)) endif -ifeq ($(CONFIG_BCMDHD_SDIO),y) -obj-$(CONFIG_BCMDHD) += dhd.o -dhd-objs += $(DHDOFILES) -else -obj-$(CONFIG_BCMDHD) += bcmdhd.o -bcmdhd-objs += $(DHDOFILES) +ifeq ($(CONFIG_WL_EASYMESH),y) + DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS)) + DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS)) +DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG endif +obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o +$(MODULE_NAME)-objs += $(DHDOFILES) + ifeq ($(CONFIG_MACH_PLATFORM),y) DHDOFILES += dhd_gpio.o ifeq ($(CONFIG_BCMDHD_DTS),y) @@ -149,25 +179,25 @@ ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y) endif ifneq ($(CONFIG_WIRELESS_EXT),) - DHDOFILES += wl_iw.o wl_escan.o - DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN + DHDOFILES += wl_iw.o + DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW endif ifneq ($(CONFIG_CFG80211),) - DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o + DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o + DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o DHDOFILES += dhd_cfg80211.o DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS - DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 - DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15 - DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000 - DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7 + DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10 DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8 DHDCFLAGS += -DWL_VIRTUAL_APSTA - DHDCFLAGS += -DPNO_SUPPORT + DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP + DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK + DHDCFLAGS += -DWL_STATIC_IF endif EXTRA_CFLAGS = $(DHDCFLAGS) ifeq ($(CONFIG_BCMDHD),m) diff --git a/bcmdhd.100.10.315.x/aiutils.c b/bcmdhd.100.10.315.x/aiutils.c index 6952983..665b9f7 100644 --- a/bcmdhd.100.10.315.x/aiutils.c +++ b/bcmdhd.100.10.315.x/aiutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: aiutils.c 769534 2018-06-26 21:19:11Z $ + * $Id: aiutils.c 823201 2019-06-03 03:49:36Z $ */ #include #include @@ -174,7 +174,7 @@ ai_scan(si_t *sih, void *regs, uint devid) case PCMCIA_BUS: default: - SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype)); ASSERT(0); return; } @@ -250,12 +250,16 @@ ai_scan(si_t *sih, void *regs, uint devid) asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); if (asd != 0) { - sii->oob_router = addrl; + if ((sii->oob_router != 0) && (sii->oob_router != addrl)) { + sii->oob_router1 = addrl; + } else { + sii->oob_router = addrl; + } } } if (cid != NS_CCB_CORE_ID && cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID && - cid != HUB_CORE_ID) + cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID) continue; } @@ -603,6 +607,7 @@ ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) if (cc == NULL) goto error; + BCM_REFERENCE(erombase); erombase = R_REG(sii->osh, &cc->eromptr); eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); @@ -699,8 +704,7 @@ ai_addrspace(si_t *sih, uint spidx, uint baidx) return cores_info->coresba[cidx]; else if (baidx == CORE_BASE_ADDR_1) return cores_info->coresba2[cidx]; - } - else if (spidx == CORE_SLAVE_PORT_1) { + } else if (spidx == CORE_SLAVE_PORT_1) { if (baidx == CORE_BASE_ADDR_0) return cores_info->csp2ba[cidx]; } @@ -709,7 +713,6 @@ ai_addrspace(si_t *sih, uint spidx, uint baidx) __FUNCTION__, baidx, spidx)); return 0; - } /* Return the size of the nth address space in the current core @@ -731,8 +734,7 @@ ai_addrspacesize(si_t *sih, uint spidx, uint baidx) return cores_info->coresba_size[cidx]; else if (baidx == CORE_BASE_ADDR_1) return cores_info->coresba2_size[cidx]; - } - else if (spidx == CORE_SLAVE_PORT_1) { + } else if (spidx == CORE_SLAVE_PORT_1) { if (baidx == CORE_BASE_ADDR_0) return cores_info->csp2ba_size[cidx]; } @@ -1213,7 +1215,6 @@ _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) } #endif /* UCM_CORRUPTION_WAR */ OSL_DELAY(1); - } void diff --git a/bcmdhd.100.10.315.x/bcm_app_utils.c b/bcmdhd.100.10.315.x/bcm_app_utils.c index e88dcef..893c7b6 100644 --- a/bcmdhd.100.10.315.x/bcm_app_utils.c +++ b/bcmdhd.100.10.315.x/bcm_app_utils.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmbloom.c b/bcmdhd.100.10.315.x/bcmbloom.c index 420a09b..211df5f 100644 --- a/bcmdhd.100.10.315.x/bcmbloom.c +++ b/bcmdhd.100.10.315.x/bcmbloom.c @@ -1,7 +1,7 @@ /* * Bloom filter support * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmbloom.c 714397 2017-08-04 08:24:38Z $ + * $Id: bcmbloom.c 788740 2018-11-13 21:45:01Z $ */ #include @@ -189,7 +189,9 @@ bcm_bloom_is_member(bcm_bloom_filter_t *bp, pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len); /* all bits must be set for a match */ + CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) { + CLANG_DIAGNOSTIC_POP() err = BCME_NOTFOUND; break; } diff --git a/bcmdhd.100.10.315.x/bcmevent.c b/bcmdhd.100.10.315.x/bcmevent.c index a9fcb37..5022479 100644 --- a/bcmdhd.100.10.315.x/bcmevent.c +++ b/bcmdhd.100.10.315.x/bcmevent.c @@ -1,7 +1,7 @@ /* * bcmevent read-only data shared by kernel or app layers * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmevent.c 755881 2018-04-05 06:32:32Z $ + * $Id: bcmevent.c 807989 2019-03-05 07:57:42Z $ */ #include @@ -149,7 +149,7 @@ static const bcmevent_name_str_t bcmevent_names[] = { #ifdef WLWNM BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), #endif /* WLWNM */ -#if defined(WL_PROXDETECT) +#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT) BCMEVENT_NAME(WLC_E_PROXD), #endif // endif BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), @@ -198,6 +198,7 @@ static const bcmevent_name_str_t bcmevent_names[] = { #endif /* WL_NAN */ BCMEVENT_NAME(WLC_E_RPSNOA), BCMEVENT_NAME(WLC_E_PHY_CAL), + BCMEVENT_NAME(WLC_E_WA_LQM), }; const char *bcmevent_get_name(uint event_type) diff --git a/bcmdhd.100.10.315.x/bcmsdh.c b/bcmdhd.100.10.315.x/bcmsdh.c index 1f90472..96b62be 100644 --- a/bcmdhd.100.10.315.x/bcmsdh.c +++ b/bcmdhd.100.10.315.x/bcmsdh.c @@ -2,7 +2,7 @@ * BCMSDH interface glue * implement bcmsdh API for SDIOH driver * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -528,7 +528,7 @@ bcmsdh_reg_read(void *sdh, uintptr addr, uint size) ASSERT(bcmsdh->init_success); - if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) { + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) { bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock return 0xFFFFFFFF; } diff --git a/bcmdhd.100.10.315.x/bcmsdh_linux.c b/bcmdhd.100.10.315.x/bcmsdh_linux.c index b27716a..ed61189 100644 --- a/bcmdhd.100.10.315.x/bcmsdh_linux.c +++ b/bcmdhd.100.10.315.x/bcmsdh_linux.c @@ -1,7 +1,7 @@ /* * SDIO access interface for drivers - linux specific (pci only) * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -81,7 +81,7 @@ typedef struct bcmsdh_os_info { } bcmsdh_os_info_t; /* debugging macros */ -#define SDLX_MSG(x) +#define SDLX_MSG(x) printf x /** * Checks to see if vendor and device IDs match a supported SDIO Host Controller. @@ -219,10 +219,13 @@ int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh) int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag) { +#if defined(OOB_INTR_ONLY) bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; unsigned long flags; - int ret; +#endif + int ret = 0; +#if defined(OOB_INTR_ONLY) spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); ret = bcmsdh->pkt_wake; @@ -230,6 +233,7 @@ int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag) bcmsdh->pkt_wake = flag; spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +#endif return ret; } #endif /* DHD_WAKE_STATUS */ @@ -385,11 +389,17 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__)); bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; #else - err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); - if (err) - SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err)); - else - bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + if (device_may_wakeup(bcmsdh_osinfo->dev)) { +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ + err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (err) + SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err)); + else + bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + } +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ #endif return 0; @@ -406,9 +416,15 @@ void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) return; } if (bcmsdh_osinfo->oob_irq_wake_enabled) { - err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); - if (!err) - bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + if (device_may_wakeup(bcmsdh_osinfo->dev)) { +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ + err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; +#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI) + } +#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */ } if (bcmsdh_osinfo->oob_irq_enabled) { disable_irq(bcmsdh_osinfo->oob_irq_num); diff --git a/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c b/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c index 338ec0e..f87dbec 100644 --- a/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c +++ b/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_sdmmc.c 766268 2018-06-07 06:44:55Z $ + * $Id: bcmsdh_sdmmc.c 782528 2018-09-28 12:15:40Z $ */ #include @@ -92,11 +92,6 @@ static void IRQHandlerF2(struct sdio_func *func); static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); #if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) extern int sdio_reset_comm(struct mmc_card *card); -#else -int sdio_reset_comm(struct mmc_card *card) -{ - return 0; -} #endif #ifdef GLOBAL_SDMMC_INSTANCE extern PBCMSDH_SDMMC_INSTANCE gInstance; @@ -607,6 +602,7 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, /* Now set it */ si->client_block_size[func] = blksize; +#ifdef USE_DYNAMIC_F2_BLKSIZE if (si->func[func] == NULL) { sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); bcmerror = BCME_NORESOURCE; @@ -618,6 +614,7 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", __FUNCTION__, func, blksize, bcmerror)); sdio_release_host(si->func[func]); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ break; } @@ -846,10 +843,10 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by #if defined(MMC_SDIO_ABORT) int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; #endif // endif - struct timespec now, before; + struct osl_timespec now, before; if (sd_msglevel & SDH_COST_VAL) - getnstimeofday(&before); + osl_do_gettimeofday(&before); sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); @@ -950,6 +947,26 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by } } #endif /* MMC_SDIO_ABORT */ + /* to allow abort command through F1 */ +#if defined(SDIO_ISR_THREAD) + else if (regaddr == SDIOD_CCCR_INTR_EXTN) { + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif else if (regaddr < 0xF0) { sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); } else { @@ -992,7 +1009,7 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by } if (sd_msglevel & SDH_COST_VAL) { - getnstimeofday(&now); + osl_do_gettimeofday(&now); sd_cost(("%s: rw=%d len=1 cost=%lds %luus\n", __FUNCTION__, rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); } @@ -1020,10 +1037,10 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add #if defined(MMC_SDIO_ABORT) int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; #endif // endif - struct timespec now, before; + struct osl_timespec now, before; if (sd_msglevel & SDH_COST_VAL) - getnstimeofday(&before); + osl_do_gettimeofday(&before); if (func == 0) { sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); @@ -1086,7 +1103,7 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add } if (sd_msglevel & SDH_COST_VAL) { - getnstimeofday(&now); + osl_do_gettimeofday(&now); sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); } @@ -1116,7 +1133,7 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func uint8 *localbuf = NULL; uint local_plen = 0; uint pkt_len = 0; - struct timespec now, before; + struct osl_timespec now, before; sd_trace(("%s: Enter\n", __FUNCTION__)); ASSERT(pkt); @@ -1124,7 +1141,7 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); if (sd_msglevel & SDH_COST_VAL) - getnstimeofday(&before); + osl_do_gettimeofday(&before); blk_size = sd->client_block_size[func]; max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK); @@ -1165,7 +1182,7 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func if (sg_count >= ARRAYSIZE(sd->sg_list)) { sd_err(("%s: sg list entries(%u) exceed limit(%zu)," " sd blk_size=%u\n", - __FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size)); + __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size)); return (SDIOH_API_RC_FAIL); } pdata += pkt_offset; @@ -1301,7 +1318,7 @@ txglomfail: MFREE(sd->osh, localbuf, ttl_len); if (sd_msglevel & SDH_COST_VAL) { - getnstimeofday(&now); + osl_do_gettimeofday(&now); sd_cost(("%s: rw=%d, ttl_len=%d, cost=%lds %luus\n", __FUNCTION__, write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); } @@ -1317,13 +1334,13 @@ sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, { bool fifo = (fix_inc == SDIOH_DATA_FIX); int err_ret = 0; - struct timespec now, before; + struct osl_timespec now, before; sd_trace(("%s: Enter\n", __FUNCTION__)); ASSERT(buf); if (sd_msglevel & SDH_COST_VAL) - getnstimeofday(&before); + osl_do_gettimeofday(&before); /* NOTE: * For all writes, each packet length is aligned to 32 (or 4) @@ -1357,7 +1374,7 @@ sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, sd_trace(("%s: Exit\n", __FUNCTION__)); if (sd_msglevel & SDH_COST_VAL) { - getnstimeofday(&now); + osl_do_gettimeofday(&now); sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); } @@ -1382,14 +1399,14 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, u { SDIOH_API_RC status; void *tmppkt; - struct timespec now, before; + struct osl_timespec now, before; sd_trace(("%s: Enter\n", __FUNCTION__)); DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); if (sd_msglevel & SDH_COST_VAL) - getnstimeofday(&before); + osl_do_gettimeofday(&before); if (pkt) { #ifdef BCMSDIOH_TXGLOM @@ -1434,7 +1451,7 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, u PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); if (sd_msglevel & SDH_COST_VAL) { - getnstimeofday(&now); + osl_do_gettimeofday(&now); sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); } @@ -1596,11 +1613,13 @@ sdioh_start(sdioh_info_t *sd, int stage) 2.6.27. The implementation prior to that is buggy, and needs broadcom's patch for it */ +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) if ((ret = sdio_reset_comm(sd->func[0]->card))) { sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); return ret; - } - else { + } else +#endif + { sd->num_funcs = 2; sd->sd_blockmode = TRUE; sd->use_client_ints = TRUE; diff --git a/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c b/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c index 4e407f4..0864e04 100644 --- a/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c +++ b/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_sdmmc_linux.c 753315 2018-03-21 04:10:12Z $ + * $Id: bcmsdh_sdmmc_linux.c 825481 2019-06-14 10:06:03Z $ */ #include @@ -43,6 +43,7 @@ #include #include #include +#include #if !defined(SDIO_VENDOR_ID_BROADCOM) #define SDIO_VENDOR_ID_BROADCOM 0x02d0 @@ -50,10 +51,6 @@ #define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 -#if !defined(SDIO_DEVICE_ID_BROADCOM_4362) -#define SDIO_DEVICE_ID_BROADCOM_4362 0x4362 -#endif // endif - extern void wl_cfg80211_set_parent_dev(void *dev); extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); @@ -196,10 +193,18 @@ static void bcmsdh_sdmmc_remove(struct sdio_func *func) /* devices we support, null terminated */ static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, - { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4362) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) }, + /* { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_ANY_ID) }, */ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, - { 0, 0, 0, 0 /* end: all zeroes */ - }, + /* end: all zeroes */ + { 0, 0, 0, 0}, }; MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); diff --git a/bcmdhd.100.10.315.x/bcmsdspi_linux.c b/bcmdhd.100.10.315.x/bcmsdspi_linux.c index d34dec5..768ccbd 100644 --- a/bcmdhd.100.10.315.x/bcmsdspi_linux.c +++ b/bcmdhd.100.10.315.x/bcmsdspi_linux.c @@ -1,7 +1,7 @@ /* * Broadcom SPI Host Controller Driver - Linux Per-port * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmspibrcm.c b/bcmdhd.100.10.315.x/bcmspibrcm.c index 2c720cc..3a852b1 100644 --- a/bcmdhd.100.10.315.x/bcmspibrcm.c +++ b/bcmdhd.100.10.315.x/bcmspibrcm.c @@ -1,7 +1,7 @@ /* * Broadcom BCMSDH to gSPI Protocol Conversion Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmstdlib_s.c b/bcmdhd.100.10.315.x/bcmstdlib_s.c index 46ab0b9..42e74c1 100644 --- a/bcmdhd.100.10.315.x/bcmstdlib_s.c +++ b/bcmdhd.100.10.315.x/bcmstdlib_s.c @@ -1,7 +1,7 @@ /* * Broadcom Secure Standard Library. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmutils.c b/bcmdhd.100.10.315.x/bcmutils.c index 59fbfd8..3cdad7b 100644 --- a/bcmdhd.100.10.315.x/bcmutils.c +++ b/bcmdhd.100.10.315.x/bcmutils.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmutils.c 760883 2018-05-03 22:57:07Z $ + * $Id: bcmutils.c 813798 2019-04-08 10:20:21Z $ */ #include @@ -83,7 +83,7 @@ pkttotlen(osl_t *osh, void *p) total = 0; for (; p; p = PKTNEXT(osh, p)) { len = PKTLEN(osh, p); - total += len; + total += (uint)len; #ifdef BCMLFRAG if (BCMLFRAG_ENAB()) { if (PKTISFRAG(osh, p)) { @@ -139,7 +139,7 @@ pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; - offset -= PKTLEN(osh, p); + offset -= (uint)PKTLEN(osh, p); } if (!p) @@ -168,7 +168,7 @@ pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) for (; p && offset; p = PKTNEXT(osh, p)) { if (offset < (uint)PKTLEN(osh, p)) break; - offset -= PKTLEN(osh, p); + offset -= (uint)PKTLEN(osh, p); } if (!p) @@ -200,7 +200,7 @@ pktdataoffset(osl_t *osh, void *p, uint offset) for (; p; p = PKTNEXT(osh, p)) { pdata = (uint8 *) PKTDATA(osh, p); pkt_off = offset - len; - len += PKTLEN(osh, p); + len += (uint)PKTLEN(osh, p); if (len > offset) break; } @@ -218,7 +218,7 @@ pktoffset(osl_t *osh, void *p, uint offset) return NULL; for (; p; p = PKTNEXT(osh, p)) { - len += PKTLEN(osh, p); + len += (uint)PKTLEN(osh, p); if (len > offset) break; } @@ -246,7 +246,7 @@ prpkt(const char *msg, osl_t *osh, void *p0) printf("%s:\n", msg); for (p = p0; p; p = PKTNEXT(osh, p)) - prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); + prhex(NULL, PKTDATA(osh, p), (uint)PKTLEN(osh, p)); } #endif // endif @@ -260,8 +260,8 @@ pktsetprio(void *pkt, bool update_vtag) struct ether_header *eh; struct ethervlan_header *evh; uint8 *pktdata; - int priority = 0; - int rc = 0; + uint priority = 0; + uint rc = 0; pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); @@ -270,18 +270,18 @@ pktsetprio(void *pkt, bool update_vtag) if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { uint16 vlan_tag; - int vlan_prio, dscp_prio = 0; + uint vlan_prio, dscp_prio = 0; evh = (struct ethervlan_header *)eh; vlan_tag = ntoh16(evh->vlan_tag); - vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + vlan_prio = (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); - uint8 tos_tc = IP_TOS46(ip_body); - dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + uint8 tos_tc = (uint8)IP_TOS46(ip_body); + dscp_prio = tos_tc >> IPV4_TOS_PREC_SHIFT; } /* DSCP priority gets precedence over 802.1P (vlan tag) */ @@ -313,35 +313,44 @@ pktsetprio(void *pkt, bool update_vtag) } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ether_header); - uint8 tos_tc = IP_TOS46(ip_body); + uint8 tos_tc = (uint8)IP_TOS46(ip_body); uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; switch (dscp) { case DSCP_EF: + case DSCP_VA: priority = PRIO_8021D_VO; break; case DSCP_AF31: case DSCP_AF32: case DSCP_AF33: + case DSCP_CS3: priority = PRIO_8021D_CL; break; case DSCP_AF21: case DSCP_AF22: case DSCP_AF23: + priority = PRIO_8021D_EE; + break; case DSCP_AF11: case DSCP_AF12: case DSCP_AF13: - priority = PRIO_8021D_EE; + case DSCP_CS2: + priority = PRIO_8021D_BE; + break; + case DSCP_CS6: + case DSCP_CS7: + priority = PRIO_8021D_NC; break; default: - priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + priority = tos_tc >> IPV4_TOS_PREC_SHIFT; break; } rc |= PKTPRIO_DSCP; } - ASSERT(priority >= 0 && priority <= MAXPRIO); - PKTSETPRIO(pkt, priority); + ASSERT(priority <= MAXPRIO); + PKTSETPRIO(pkt, (int)priority); return (rc | priority); } @@ -376,12 +385,12 @@ pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag) uint rc = 0; pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); - pktlen = PKTLEN(OSH_NULL, pkt); + pktlen = (uint)PKTLEN(OSH_NULL, pkt); if (pktgetdscp(pktdata, pktlen, &dscp)) { rc = PKTPRIO_DSCP; user_priority = dscp2up(up_table, dscp); - PKTSETPRIO(pkt, user_priority); + PKTSETPRIO(pkt, (int)user_priority); } return (rc | user_priority); @@ -408,7 +417,7 @@ pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) if (eh->ether_type == HTON16(ETHER_TYPE_IP)) { ip_body = pktdata + sizeof(struct ether_header); - *dscp = IP_DSCP46(ip_body); + *dscp = (uint8)IP_DSCP46(ip_body); rc = TRUE; } else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { @@ -418,7 +427,7 @@ pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && evh->ether_type == HTON16(ETHER_TYPE_IP)) { ip_body = pktdata + sizeof(struct ethervlan_header); - *dscp = IP_DSCP46(ip_body); + *dscp = (uint8)IP_DSCP46(ip_body); rc = TRUE; } } @@ -463,7 +472,7 @@ wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) uint8 *except_ptr = (uint8 *)qos_map_ie->data; uint8 except_len = len - QOS_MAP_FIXED_LENGTH; uint8 *range_ptr = except_ptr + except_len; - int i; + uint8 i; /* fill in ranges */ for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { @@ -503,7 +512,7 @@ const char * BCMRAMFN(bcmerrorstr)(int bcmerror) { /* check if someone added a bcmerror code but forgot to add errorstring */ - ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + ASSERT((uint)ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); if (bcmerror > 0 || bcmerror < BCME_LAST) { snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); @@ -776,7 +785,7 @@ bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == bcm_count_leading_zeros(bitmap)); bitix = (BCM_MWBMAP_BITS_WORD - 1) - - bcm_count_leading_zeros(bitmap); /* use asm clz */ + - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */ wordix = BCM_MWBMAP_MULOP(wordix) + bitix; /* Clear bit if wd count is 0, without conditional branch */ @@ -784,14 +793,14 @@ bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ mwbmap_p->wd_count[wordix]--; - count = mwbmap_p->wd_count[wordix]; + count = (uint32)mwbmap_p->wd_count[wordix]; MWBMAP_ASSERT(count == (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ MWBMAP_ASSERT(count >= 0); /* clear wd_bitmap bit if id_map count is 0 */ - bitmap = (count == 0) << bitix; + bitmap = ((uint32)(count == 0)) << BCM_MWBMAP_MODOP(bitix); MWBMAP_DBG(( "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", @@ -811,7 +820,7 @@ bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) bcm_count_leading_zeros(bitmap)); bitix = BCM_MWBMAP_MULOP(wordix) + (BCM_MWBMAP_BITS_WORD - 1) - - bcm_count_leading_zeros(bitmap); /* use asm clz */ + - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */ mwbmap_p->ifree--; /* decrement system wide free count */ MWBMAP_ASSERT(mwbmap_p->ifree >= 0); @@ -870,12 +879,12 @@ bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ mwbmap_p->wd_count[bitix]--; - count = mwbmap_p->wd_count[bitix]; + count = (uint32)mwbmap_p->wd_count[bitix]; MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ MWBMAP_ASSERT(count >= 0); - bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); + bitmap = (uint32)(count == 0) << BCM_MWBMAP_MODOP(bitix); MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, @@ -959,7 +968,7 @@ bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) ASSERT(mwbmap_p->ifree >= 0); - return mwbmap_p->ifree; + return (uint32)mwbmap_p->ifree; } /* Determine whether an index is inuse or free */ @@ -1002,7 +1011,7 @@ bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ - count = mwbmap_p->wd_count[ix]; + count = (uint32)mwbmap_p->wd_count[ix]; MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); @@ -1032,7 +1041,7 @@ bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) #if defined(BCM_MWBMAP_USE_CNTSETBITS) count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); #else /* ! BCM_MWBMAP_USE_CNTSETBITS */ - count = mwbmap_p->wd_count[idmap_ix]; + count = (uint32)mwbmap_p->wd_count[idmap_ix]; ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); #endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ ASSERT(count != 0U); @@ -1213,7 +1222,9 @@ id16_map_alloc(void * id16_map_hndl) id16_map_t * id16_map; ASSERT(id16_map_hndl != NULL); - + if (!id16_map_hndl) { + return ID16_INVALID; + } id16_map = (id16_map_t *)id16_map_hndl; ASSERT(id16_map->total > 0); @@ -1281,7 +1292,9 @@ id16_map_audit(void * id16_map_hndl) id16_map_t * id16_map; ASSERT(id16_map_hndl != NULL); - + if (!id16_map_hndl) { + goto done; + } id16_map = (id16_map_t *)id16_map_hndl; ASSERT(id16_map->stack_idx >= -1); @@ -1331,23 +1344,23 @@ done: void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) { - uint32 mem_size; - mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + uint32 memsize; + memsize = sizeof(dll_pool_t) + (elems_max * elem_size); if (pool) - MFREE(osh, pool, mem_size); + MFREE(osh, pool, memsize); } dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) { - uint32 mem_size, i; + uint32 memsize, i; dll_pool_t * dll_pool_p; dll_t * elem_p; ASSERT(elem_size > sizeof(dll_t)); - mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + memsize = sizeof(dll_pool_t) + (elems_max * elem_size); - if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, memsize)) == NULL) { printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", elems_max, elem_size); ASSERT(0); @@ -1447,7 +1460,7 @@ bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) if ((r == -1) || (r >= (int)b->size)) { b->size = 0; } else { - b->size -= r; + b->size -= (uint)r; b->buf += r; } @@ -1568,7 +1581,7 @@ bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) } else if (bcm_isprint((uchar)c)) { *p++ = (char)c; } else { - p += snprintf(p, (endp - p), "\\x%02X", c); + p += snprintf(p, (size_t)(endp - p), "\\x%02X", c); } } *p = '\0'; @@ -1753,7 +1766,7 @@ bcm_strtoull(const char *cp, char **endp, uint base) result = 0; while (bcm_isxdigit(*cp) && - (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + (value = (uint64)(bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10)) < base) { result = result*base + value; /* Detected overflow */ if (result < last_result && !minus) { @@ -1805,7 +1818,7 @@ bcmstrstr(const char *haystack, const char *needle) len = (int)strlen(haystack) - nlen + 1; for (i = 0; i < len; i++) - if (memcmp(needle, &haystack[i], nlen) == 0) + if (memcmp(needle, &haystack[i], (size_t)nlen) == 0) return DISCARD_QUAL(&haystack[i], char); return (NULL); } @@ -1909,7 +1922,7 @@ bcmstrtok(char **string, const char *delimiters, char *tokdelim) for (; *str; str++) { if (map[*str >> 5] & (1 << (*str & 31))) { if (tokdelim != NULL) { - *tokdelim = *str; + *tokdelim = (char)*str; } *str++ = '\0'; @@ -2005,7 +2018,7 @@ bcm_ether_atoe(const char *p, struct ether_addr *ea) char *ep; for (;;) { - ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); + ea->octet[i++] = (uint8) bcm_strtoul(p, &ep, 16); p = ep; if (!*p++ || i == 6) break; @@ -2505,7 +2518,7 @@ bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) */ if (datalen > 0) { - memcpy(dst_tlv->data, data, datalen); + memcpy(dst_tlv->data, data, (size_t)datalen); } /* update the output destination poitner to point past @@ -2575,7 +2588,7 @@ bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_ /* if len + tlv hdr len is more than destlen, don't do anything * just return the buffer untouched */ - if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { + if ((int)(datalen + (int)BCM_TLV_HDR_SIZE) <= dst_maxlen) { new_dst = bcm_write_tlv(type, data, datalen, dst); } @@ -2914,6 +2927,109 @@ bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen) GCC_DIAGNOSTIC_POP(); } +/** + * Advance a const tlv buffer pointer and length up to the given tlv element pointer + * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data + * are all in the range of the buffer/length. + * + * @param elt pointer to a valid bcm_tlv_t in the buffer + * @param buffer pointer to a tlv buffer + * @param buflen length of the buffer in bytes + * + * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter + * will be set to NULL and *buflen parameter will be set to zero. Otherwise, + * *buffer will point to elt, and *buflen will have been adjusted by the the + * difference between *buffer and elt. + */ +void +bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen) +{ + uint new_buflen; + const uint8 *new_buffer; + + new_buffer = (const uint8*)elt; + + /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap, + * and that the elt pointer is in the range of [buffer, buffer + buflen] + */ + if ((*buffer != NULL) && + ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) && + (new_buffer >= *buffer) && + (new_buffer < (*buffer + *buflen))) { + /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint + * from ptrdiff is ok + */ + uint delta = (uint)(new_buffer - *buffer); + + /* New buffer length is old len minus the delta from the buffer start to elt. + * The check just above guarantees that the subtractions does not underflow. + */ + new_buflen = *buflen - delta; + + /* validate current elt */ + if (bcm_valid_tlv(elt, new_buflen)) { + /* All good, so update the input/output parameters */ + *buffer = new_buffer; + *buflen = new_buflen; + return; + } + } + + /* something did not check out, clear out the buffer info */ + *buffer = NULL; + *buflen = 0; + + return; +} + +/** + * Advance a const tlv buffer pointer and length past the given tlv element pointer + * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data + * are all in the range of the buffer/length. The function also checks that the + * remaining buffer starts with a valid tlv. + * + * @param elt pointer to a valid bcm_tlv_t in the buffer + * @param buffer pointer to a tlv buffer + * @param buflen length of the buffer in bytes + * + * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer + * following the elt does not begin with a tlv in the buffer bounds, the *buffer + * parameter will be set to NULL and *buflen parameter will be set to zero. + * Otherwise, *buffer will point to the first byte past elt, and *buflen will + * have the remaining buffer length. + */ +void +bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen) +{ + /* Start by advancing the buffer up to the given elt */ + bcm_tlv_buffer_advance_to(elt, buffer, buflen); + + /* if that did not work, bail out */ + if (*buflen == 0) { + return; + } + +#if defined(__COVERITY__) + /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element, + * so its elt->len is in the bounds of the buffer. The following check prevents + * Coverity from flagging the (elt->data + elt->len) statement below as using a + * tainted elt->len to index into array 'elt->data'. + */ + if (elt->len > *buflen) { + return; + } +#endif /* __COVERITY__ */ + + /* We know we are advanced up to a good tlv. + * Now just advance to the following tlv. + */ + elt = (const bcm_tlv_t*)(elt->data + elt->len); + + bcm_tlv_buffer_advance_to(elt, buffer, buflen); + + return; +} + /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element @@ -3000,7 +3116,7 @@ bcm_tlv_t * bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen) { bcm_tlv_t * ret; - ret = bcm_parse_tlvs(buf, buflen, key); + ret = bcm_parse_tlvs(buf, (uint)buflen, key); if (ret == NULL || ret->len < min_bodylen) { return NULL; } @@ -3062,8 +3178,8 @@ bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len) bit = bd->bitfield[i].bit; if ((flags & mask) == bit) { if (len > (int)strlen(name)) { - slen = strlen(name); - strncpy(buf, name, slen+1); + slen = (int)strlen(name); + strncpy(buf, name, (size_t)slen+1); } break; } @@ -3097,7 +3213,7 @@ bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) } else if ((flags & bit) == 0) continue; flags &= ~bit; - nlen = strlen(name); + nlen = (int)strlen(name); slen += nlen; /* count btwn flag space */ if (flags != 0) @@ -3106,7 +3222,7 @@ bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) if (len <= slen) break; /* copy NULL char but don't count it */ - strncpy(p, name, nlen + 1); + strncpy(p, name, (size_t)nlen + 1); p += nlen; /* copy btwn flag space and NULL char */ if (flags != 0) @@ -3143,8 +3259,10 @@ bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, for (i = 0; i < bdsz; i++) { bit = bd[i].bit; name = bd[i].name; + CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); if (isset(addr, bit)) { - nlen = strlen(name); + CLANG_DIAGNOSTIC_POP(); + nlen = (int)strlen(name); slen += nlen; /* need SPACE - for simplicity */ slen += 1; @@ -3153,7 +3271,7 @@ bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, more = TRUE; break; } - memcpy(p, name, nlen); + memcpy(p, name, (size_t)nlen); p += nlen; p[0] = ' '; p += 1; @@ -3201,12 +3319,12 @@ prhex(const char *msg, const uchar *buf, uint nbytes) p = line; for (i = 0; i < nbytes; i++) { if (i % 16 == 0) { - nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + nchar = snprintf(p, (size_t)len, " %04x: ", i); /* line prefix */ p += nchar; len -= nchar; } if (len > 0) { - nchar = snprintf(p, len, "%02x ", buf[i]); + nchar = snprintf(p, (size_t)len, "%02x ", buf[i]); p += nchar; len -= nchar; } @@ -3270,7 +3388,18 @@ bcm_chipname(uint chipid, char *buf, uint len) const char *fmt; fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + /* + * The following call to snprintf generates a compiler warning + * due to -Wformat-nonliteral. However, the format string is coming + * from internal callers rather than external data input, and is a + * useful debugging tool serving a variety of diagnostics. Rather + * than expand code size by replicating multiple functions with different + * argument lists, or disabling the warning globally, let's consider + * if we can just disable the warning for this one instance. + */ + CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() snprintf(buf, len, fmt, chipid); + CLANG_DIAGNOSTIC_POP() return buf; } @@ -3328,14 +3457,26 @@ bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc while (bufsize > 1) { if (cur_ptr->nameandfmt == NULL) break; + + /* + * The following call to snprintf generates a compiler warning + * due to -Wformat-nonliteral. However, the format string is coming + * from internal callers rather than external data input, and is a + * useful debugging tool serving a variety of diagnostics. Rather + * than expand code size by replicating multiple functions with different + * argument lists, or disabling the warning globally, let's consider + * if we can just disable the warning for this one instance. + */ + CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() len = snprintf(buf, bufsize, cur_ptr->nameandfmt, - read_rtn(arg0, arg1, cur_ptr->offset)); + read_rtn(arg0, arg1, cur_ptr->offset)); + CLANG_DIAGNOSTIC_POP() /* check for snprintf overflow or error */ if (len < 0 || (uint32)len >= bufsize) - len = bufsize - 1; + len = (int)(bufsize - 1); buf += len; - bufsize -= len; - filled_len += len; + bufsize -= (uint32)len; + filled_len += (uint32)len; cur_ptr++; } return filled_len; @@ -3413,7 +3554,7 @@ bcm_qdbm_to_mw(uint8 qdbm) /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ - return ((nqdBm_to_mW_map[idx] + factor/2) / factor); + return (uint16)((nqdBm_to_mW_map[idx] + factor/2) / factor); } uint8 @@ -3492,7 +3633,7 @@ process_nvram_vars(char *varbuf, unsigned int len) break; nv_ver[n-1] = varbuf[n]; } - printk("NVRAM version: %s\n", nv_ver); + printf("NVRAM version: %s\n", nv_ver); } for (n = 0; n < len; n++) { @@ -3611,13 +3752,13 @@ uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) { while (len > 1) { - sum += (buf[0] << 8) | buf[1]; + sum += (uint32)((buf[0] << 8) | buf[1]); buf += 2; len -= 2; } if (len > 0) { - sum += (*buf) << 8; + sum += (uint32)((*buf) << 8); } while (sum >> 16) { @@ -3723,7 +3864,7 @@ ipv4_hdr_cksum(uint8 *ip, int ip_len) ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2; /* return calculated chksum */ - return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip)); + return ip_cksum(sum, ptr, (uint32)((uint)ip_len - OFFSETOF(struct ipv4_hdr, src_ip))); } /* calculate TCP header checksum using partial sum */ @@ -3766,6 +3907,8 @@ ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len) ASSERT(tcp != NULL); ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + if (!ip || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN)) + return 0; /* pseudo header cksum */ memset(&tcp_ps, 0, sizeof(tcp_ps)); memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN); @@ -3802,6 +3945,8 @@ ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len) ASSERT(tcp != NULL); ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + if (!ipv6 || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN)) + return 0; /* pseudo header cksum */ memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo)); memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr, @@ -3873,7 +4018,7 @@ setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val) /* all bits are in the same byte */ if (fbyte == lbyte) { - mask = ((1 << nbits) - 1) << fbit; + mask = (uint8)(((1 << nbits) - 1) << fbit); addr[fbyte] &= ~mask; addr[fbyte] |= (uint8)(val << fbit); return; @@ -3881,7 +4026,7 @@ setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val) /* first partial byte */ if (fbit > 0) { - mask = (0xff << fbit); + mask = (uint8)(0xff << fbit); addr[fbyte] &= ~mask; addr[fbyte] |= (uint8)(val << fbit); val >>= (8 - fbit); @@ -3891,7 +4036,7 @@ setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val) /* last partial byte */ if (rbits > 0) { - mask = (1 << rbits) - 1; + mask = (uint8)((1 << rbits) - 1); addr[lbyte] &= ~mask; addr[lbyte] |= (uint8)(val >> (nbits - rbits)); lbyte --; /* last full byte */ @@ -3926,7 +4071,7 @@ getbits(const uint8 *addr, uint size, uint stbit, uint nbits) /* all bits are in the same byte */ if (fbyte == lbyte) { - mask = ((1 << nbits) - 1) << fbit; + mask = (uint8)(((1 << nbits) - 1) << fbit); val = (addr[fbyte] & mask) >> fbit; return val; } @@ -3934,21 +4079,21 @@ getbits(const uint8 *addr, uint size, uint stbit, uint nbits) /* first partial byte */ if (fbit > 0) { bits = 8 - fbit; - mask = (0xff << fbit); + mask = (uint8)(0xFFu << fbit); val |= (addr[fbyte] & mask) >> fbit; fbyte ++; /* first full byte */ } /* last partial byte */ if (rbits > 0) { - mask = (1 << rbits) - 1; - val |= (addr[lbyte] & mask) << (nbits - rbits); + mask = (uint8)((1 << rbits) - 1); + val |= (uint32)((addr[lbyte] & mask) << (nbits - rbits)); lbyte --; /* last full byte */ } /* remaining full byte(s) */ for (byte = fbyte; byte <= lbyte; byte ++) { - val |= (addr[byte] << (((byte - fbyte) << 3) + bits)); + val |= (uint32)((addr[byte] << (((byte - fbyte) << 3) + bits))); } return val; @@ -4183,7 +4328,7 @@ replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, unsigned int *datalen) { char *p; - int variable_heading_len, record_len, variable_record_len = strlen(variable) + 1; + int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1; char *buf_end = varbuf + buflen; p = strchr(variable, '='); if (!p) { @@ -4194,9 +4339,10 @@ replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, /* Scanning NVRAM, record by record up to trailing 0 */ for (p = varbuf; *p; p += strlen(p) + 1) { /* If given variable found - remove it */ - if (!strncmp(p, variable, variable_heading_len)) { - record_len = strlen(p) + 1; - memmove_s(p, buf_end - p, p + record_len, buf_end - (p + record_len)); + if (!strncmp(p, variable, (size_t)variable_heading_len)) { + record_len = (int)strlen(p) + 1; + memmove_s(p, buf_end - p, p + record_len, + (size_t)(buf_end - (p + record_len))); } } /* If buffer does not have space for given variable - return FALSE */ @@ -4204,7 +4350,7 @@ replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, return FALSE; } /* Copy given variable to end of buffer */ - memmove_s(p, buf_end - p, variable, variable_record_len); + memmove_s(p, buf_end - p, variable, (size_t)variable_record_len); /* Adding trailing 0 */ p[variable_record_len] = 0; /* Setting optional output parameter - length of data in buffer */ diff --git a/bcmdhd.100.10.315.x/bcmwifi_channels.c b/bcmdhd.100.10.315.x/bcmwifi_channels.c index 531246e..0c8b1cd 100644 --- a/bcmdhd.100.10.315.x/bcmwifi_channels.c +++ b/bcmdhd.100.10.315.x/bcmwifi_channels.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmwifi_channels.c 695288 2017-04-19 17:20:39Z $ + * $Id: bcmwifi_channels.c 806092 2019-02-21 08:19:13Z $ */ #include @@ -256,7 +256,7 @@ channel_to_sb(uint center_ch, uint primary_ch, uint bw) return -1; } - return sb; + return (int)sb; } /* return primary20 channel given center channel and side band */ @@ -275,7 +275,7 @@ channel_80mhz_to_id(uint ch) uint i; for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { if (ch == wf_5g_80m_chans[i]) - return i; + return (int)i; } return -1; @@ -399,7 +399,7 @@ wf_chspec_aton(const char *a) if (!read_uint(&a, &num)) return 0; /* if we are looking at a 'g', then the first number was a band */ - c = tolower((int)a[0]); + c = tolower(a[0]); if (c == 'g') { a++; /* consume the char */ @@ -415,7 +415,7 @@ wf_chspec_aton(const char *a) if (!read_uint(&a, &pri_ch)) return 0; - c = tolower((int)a[0]); + c = tolower(a[0]); } else { /* first number is channel, use default for band */ @@ -469,7 +469,7 @@ wf_chspec_aton(const char *a) * or '+80' if bw = 80, to make '80+80' bw. */ - c = tolower((int)a[0]); + c = (char)tolower((int)a[0]); /* if we have a 2g/40 channel, we should have a l/u spec now */ if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { @@ -573,7 +573,7 @@ done_read: sb = channel_to_sb(center_ch[i], pri_ch, bw); if (sb >= 0) { chspec_ch = center_ch[i]; - chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT); break; } } @@ -609,10 +609,10 @@ done_read: return 0; } - chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT); } - chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb); + chspec = (chanspec_t)(chspec_ch | chspec_band | chspec_bw | chspec_sb); if (wf_chspec_malformed(chspec)) return 0; @@ -796,6 +796,35 @@ wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2) return same_primary; } +/** + * Create a 20MHz chanspec for the given band. + * + * This function returns a 20MHz chanspec in the given band. + * + * @param channel 20MHz channel number + * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G) + * + * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error. + */ +chanspec_t +wf_create_20MHz_chspec(uint channel, chanspec_band_t band) +{ + chanspec_t chspec; + + if (channel <= WL_CHANSPEC_CHAN_MASK && + (band == WL_CHANSPEC_BAND_2G || + band == WL_CHANSPEC_BAND_5G)) { + chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel; + if (!wf_chspec_valid(chspec)) { + chspec = INVCHANSPEC; + } + } else { + chspec = INVCHANSPEC; + } + + return chspec; +} + /** * Return the primary 20MHz channel. * @@ -956,7 +985,7 @@ extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec) } /* Create primary 40MHz chanspec */ - chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | + chspec40 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | sb | center_chan); } @@ -1006,7 +1035,7 @@ wf_mhz2channel(uint freq, uint start_factor) if ((freq < base) || (freq > base + 1000)) return -1; - offset = freq - base; + offset = (int)(freq - base); ch = offset / 5; /* check that frequency is a 5MHz multiple from the base */ @@ -1047,7 +1076,7 @@ wf_channel2mhz(uint ch, uint start_factor) else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) freq = 2484; else - freq = ch * 5 + start_factor / 2; + freq = (int)(ch * 5 + start_factor / 2); return freq; } @@ -1140,7 +1169,7 @@ wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1) seg1 = chan0_id; } - chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) | + chanspec = (uint16)((seg0 << WL_CHANSPEC_CHAN1_SHIFT) | (seg1 << WL_CHANSPEC_CHAN2_SHIFT) | (sb << WL_CHANSPEC_CTL_SB_SHIFT) | WL_CHANSPEC_BW_8080 | @@ -1223,7 +1252,7 @@ wf_chspec_primary80_chspec(chanspec_t chspec) center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec)); /* Create primary 80MHz chanspec */ - chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); } else if (CHSPEC_IS160(chspec)) { center_chan = CHSPEC_CHANNEL(chspec); @@ -1240,7 +1269,7 @@ wf_chspec_primary80_chspec(chanspec_t chspec) } /* Create primary 80MHz chanspec */ - chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); } else { chspec80 = INVCHANSPEC; @@ -1265,7 +1294,7 @@ wf_chspec_secondary80_chspec(chanspec_t chspec) center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec)); /* Create secondary 80MHz chanspec */ - chspec80 = (WL_CHANSPEC_BAND_5G | + chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | WL_CHANSPEC_CTL_SB_LL | center_chan); @@ -1283,7 +1312,7 @@ wf_chspec_secondary80_chspec(chanspec_t chspec) } /* Create secondary 80MHz chanspec */ - chspec80 = (WL_CHANSPEC_BAND_5G | + chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | WL_CHANSPEC_CTL_SB_LL | center_chan); @@ -1316,7 +1345,7 @@ wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch) else { /* for 20, 40, and 80 Mhz */ ch[0] = CHSPEC_CHANNEL(chspec); - ch[1] = -1; + ch[1] = 0xFFu; } return; @@ -1414,11 +1443,11 @@ wf_get_all_ext(chanspec_t chspec, uint8 *pext) uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t); if (CHSPEC_IS20(chspec)) return; /* nothing more to do since 20MHz chspec */ /* 20MHz EXT */ - (pext)[1] = pri_ch + (IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART); + (pext)[1] = pri_ch + (uint8)(IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART); if (CHSPEC_IS40(chspec)) return; /* nothing more to do since 40MHz chspec */ /* center 40MHz EXT */ - t = wf_channel2chspec(pri_ch + (IS_CTL_IN_L40(chspec) ? - CH_40MHZ_APART : -CH_40MHZ_APART), WL_CHANSPEC_BW_40); + t = wf_channel2chspec((uint)(pri_ch + (IS_CTL_IN_L40(chspec) ? + CH_40MHZ_APART : -CH_40MHZ_APART)), WL_CHANSPEC_BW_40); GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */ if (CHSPEC_IS80(chspec)) return; /* nothing more to do since 80MHz chspec */ t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL); diff --git a/bcmdhd.100.10.315.x/bcmwifi_channels.h b/bcmdhd.100.10.315.x/bcmwifi_channels.h index 6376dd9..2e0d07e 100644 --- a/bcmdhd.100.10.315.x/bcmwifi_channels.h +++ b/bcmdhd.100.10.315.x/bcmwifi_channels.h @@ -3,7 +3,7 @@ * This header file housing the define and function prototype use by * both the wl driver, tools & Apps. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmwifi_channels.h 695288 2017-04-19 17:20:39Z $ + * $Id: bcmwifi_channels.h 806092 2019-02-21 08:19:13Z $ */ #ifndef _bcmwifi_channels_h_ @@ -34,6 +34,9 @@ /* A chanspec holds the channel number, band, bandwidth and primary 20MHz sideband */ typedef uint16 chanspec_t; +typedef uint16 chanspec_band_t; +typedef uint16 chanspec_bw_t; +typedef uint16 chanspec_subband_t; /* channel defines */ #define CH_80MHZ_APART 16 @@ -41,7 +44,11 @@ typedef uint16 chanspec_t; #define CH_20MHZ_APART 4 #define CH_10MHZ_APART 2 #define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ -#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ + +#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */ +#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */ +#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */ +#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */ /* maximum # channels the s/w supports */ #define MAXCHANNEL 224 /* max # supported channels. The max channel no is above, @@ -94,24 +101,24 @@ typedef struct { #define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU #define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL -#define WL_CHANSPEC_BW_MASK 0x3800 -#define WL_CHANSPEC_BW_SHIFT 11 -#define WL_CHANSPEC_BW_5 0x0000 -#define WL_CHANSPEC_BW_10 0x0800 -#define WL_CHANSPEC_BW_20 0x1000 -#define WL_CHANSPEC_BW_40 0x1800 -#define WL_CHANSPEC_BW_80 0x2000 -#define WL_CHANSPEC_BW_160 0x2800 -#define WL_CHANSPEC_BW_8080 0x3000 - -#define WL_CHANSPEC_BAND_MASK 0xc000 -#define WL_CHANSPEC_BAND_SHIFT 14 -#define WL_CHANSPEC_BAND_2G 0x0000 -#define WL_CHANSPEC_BAND_3G 0x4000 -#define WL_CHANSPEC_BAND_4G 0x8000 -#define WL_CHANSPEC_BAND_5G 0xc000 -#define INVCHANSPEC 255 -#define MAX_CHANSPEC 0xFFFF +#define WL_CHANSPEC_BW_MASK 0x3800u +#define WL_CHANSPEC_BW_SHIFT 11u +#define WL_CHANSPEC_BW_5 0x0000u +#define WL_CHANSPEC_BW_10 0x0800u +#define WL_CHANSPEC_BW_20 0x1000u +#define WL_CHANSPEC_BW_40 0x1800u +#define WL_CHANSPEC_BW_80 0x2000u +#define WL_CHANSPEC_BW_160 0x2800u +#define WL_CHANSPEC_BW_8080 0x3000u + +#define WL_CHANSPEC_BAND_MASK 0xc000u +#define WL_CHANSPEC_BAND_SHIFT 14u +#define WL_CHANSPEC_BAND_2G 0x0000u +#define WL_CHANSPEC_BAND_3G 0x4000u +#define WL_CHANSPEC_BAND_4G 0x8000u +#define WL_CHANSPEC_BAND_5G 0xc000u +#define INVCHANSPEC 255u +#define MAX_CHANSPEC 0xFFFFu #define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G) @@ -595,6 +602,11 @@ extern uint8 wf_chspec_primary20_chan(chanspec_t chspec); */ extern const char *wf_chspec_to_bw_str(chanspec_t chspec); +/** + * Create a 20MHz chanspec for the given band. + */ +chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band); + /** * Return the primary 20MHz chanspec. * diff --git a/bcmdhd.100.10.315.x/bcmwifi_rates.h b/bcmdhd.100.10.315.x/bcmwifi_rates.h index da2c15b..95b90a7 100644 --- a/bcmdhd.100.10.315.x/bcmwifi_rates.h +++ b/bcmdhd.100.10.315.x/bcmwifi_rates.h @@ -1,7 +1,7 @@ /* * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmwifi_rspec.h b/bcmdhd.100.10.315.x/bcmwifi_rspec.h index 37494fd..3b7685f 100644 --- a/bcmdhd.100.10.315.x/bcmwifi_rspec.h +++ b/bcmdhd.100.10.315.x/bcmwifi_rspec.h @@ -1,7 +1,7 @@ /* * Common OS-independent driver header for rate management. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/bcmxtlv.c b/bcmdhd.100.10.315.x/bcmxtlv.c index d15a3d3..8e4a336 100644 --- a/bcmdhd.100.10.315.x/bcmxtlv.c +++ b/bcmdhd.100.10.315.x/bcmxtlv.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmxtlv.c 700655 2017-05-20 06:09:06Z $ + * $Id: bcmxtlv.c 788740 2018-11-13 21:45:01Z $ */ #include @@ -95,12 +95,17 @@ bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) int len; lenp = (const uint8 *)&elt->len; /* nominal */ - if (opts & BCM_XTLV_OPTION_IDU8) --lenp; + if (opts & BCM_XTLV_OPTION_IDU8) { + --lenp; + } - if (opts & BCM_XTLV_OPTION_LENU8) + if (opts & BCM_XTLV_OPTION_LENU8) { len = *lenp; - else + } else if (opts & BCM_XTLV_OPTION_LENBE) { + len = (uint32)hton16(elt->len); + } else { len = ltoh16_ua(lenp); + } return len; } @@ -109,10 +114,13 @@ int bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) { int id = 0; - if (opts & BCM_XTLV_OPTION_IDU8) + if (opts & BCM_XTLV_OPTION_IDU8) { id = *(const uint8 *)elt; - else + } else if (opts & BCM_XTLV_OPTION_IDBE) { + id = (uint32)hton16(elt->id); + } else { id = ltoh16_ua((const uint8 *)elt); + } return id; } @@ -217,7 +225,9 @@ bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len, const uint8 *data, *lenp = (uint8)len; data_buf = lenp + sizeof(uint8); } else { - ASSERT(!"Unexpected xtlv option"); + bool Unexpected_xtlv_option = TRUE; + BCM_REFERENCE(Unexpected_xtlv_option); + ASSERT(!Unexpected_xtlv_option); return; } diff --git a/bcmdhd.100.10.315.x/dbus.c b/bcmdhd.100.10.315.x/dbus.c index 4777a5b..baacbfc 100644 --- a/bcmdhd.100.10.315.x/dbus.c +++ b/bcmdhd.100.10.315.x/dbus.c @@ -2687,35 +2687,6 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) return bcmerror; } -void -dhd_set_path_params(struct dhd_bus *bus) -{ - /* External conf takes precedence if specified */ - dhd_conf_preinit(bus->dhd); - - if (bus->dhd->conf_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); - } - if (bus->dhd->clm_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); - } -#ifdef CONFIG_PATH_AUTO_SELECT - dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); -#endif - - dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); - - dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); - dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); - dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); - - printf("Final fw_path=%s\n", bus->fw_path); - printf("Final nv_path=%s\n", bus->nv_path); - printf("Final clm_path=%s\n", bus->dhd->clm_path); - printf("Final conf_path=%s\n", bus->dhd->conf_path); - -} - void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pclm_path, char *pconf_path) @@ -2732,7 +2703,7 @@ dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, bus->dhd->clm_path = pclm_path; bus->dhd->conf_path = pconf_path; - dhd_set_path_params(bus); + dhd_conf_set_path_params(bus->dhd, NULL, NULL, bus->fw_path, bus->nv_path); } @@ -2800,6 +2771,8 @@ dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK) goto fail; #endif + } else { + goto fail; } } } else { diff --git a/bcmdhd.100.10.315.x/dhd.h b/bcmdhd.100.10.315.x/dhd.h index 425043d..c7956a6 100644 --- a/bcmdhd.100.10.315.x/dhd.h +++ b/bcmdhd.100.10.315.x/dhd.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd.h 771671 2018-07-11 06:58:25Z $ + * $Id: dhd.h 822756 2019-05-30 13:20:26Z $ */ /**************** @@ -50,9 +50,9 @@ #include #include #include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) +#if defined(CONFIG_HAS_WAKELOCK) #include -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ +#endif /* defined CONFIG_HAS_WAKELOCK */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) #include #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */ @@ -74,6 +74,7 @@ int get_scheduler_policy(struct task_struct *p); #endif /* PCIE_FULL_DONGLE */ #include +#include #include #include #include @@ -90,6 +91,8 @@ int get_scheduler_policy(struct task_struct *p); #include #endif /* DHD_ERPOM */ +#include + #ifdef DEBUG_DPC_THREAD_WATCHDOG #define MAX_RESCHED_CNT 600 #endif /* DEBUG_DPC_THREAD_WATCHDOG */ @@ -116,6 +119,9 @@ struct dhd_info; struct dhd_ioctl; struct dhd_dbg; struct dhd_ts; +#ifdef DNGL_AXI_ERROR_LOGGING +struct dhd_axi_error_dump; +#endif /* DNGL_AXI_ERROR_LOGGING */ /* The level of bus communication with the dongle */ enum dhd_bus_state { @@ -159,6 +165,7 @@ enum dhd_bus_devreset_type { #define DHD_BUS_BUSY_IN_MEMDUMP 0x1000 #define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000 #define DHD_BUS_BUSY_IN_LOGDUMP 0x4000 +#define DHD_BUS_BUSY_IN_HALDUMP 0x8000 #define DHD_BUS_BUSY_SET_IN_TX(dhdp) \ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX @@ -185,11 +192,13 @@ enum dhd_bus_devreset_type { #define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \ (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED #define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \ - (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP #define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \ - (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP #define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \ - (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP +#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP #define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX @@ -221,6 +230,8 @@ enum dhd_bus_devreset_type { (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP #define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \ (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP +#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP #define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) @@ -254,6 +265,8 @@ enum dhd_bus_devreset_type { ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP) #define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \ ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP) +#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) #define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \ ((dhdp)->dhd_bus_busy_state == 0) @@ -274,6 +287,9 @@ enum dhd_bus_devreset_type { #define DHD_BUS_CHECK_REMOVE(dhdp) \ ((dhdp)->busstate == DHD_BUS_REMOVE) +/* IOVar flags for common error checks */ +#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */ + #define MAX_MTU_SZ (1600u) /* (u64)result = (u64)dividend / (u64)divisor */ @@ -296,10 +312,12 @@ enum dhd_bus_devreset_type { }) #define SEC_USEC_FMT \ - "%015llu.%06u" + "%5llu.%06u" +/* t: time in nano second */ #define GET_SEC_USEC(t) \ - DIV_U64_BY_U32(t, USEC_PER_SEC), MOD_U64_BY_U32(t, USEC_PER_SEC) + DIV_U64_BY_U32(t, NSEC_PER_SEC), \ + ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC)) /* Download Types */ typedef enum download_type { @@ -353,9 +371,13 @@ enum dhd_op_flags { #define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ #define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ -#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ #define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */ #define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */ +#ifndef CUSTOM_SCAN_PASSIVE_TIME +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ +#else +#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */ +#endif /* CUSTOM_SCAN_PASSIVE_TIME */ #ifndef POWERUP_MAX_RETRY #define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */ @@ -374,7 +396,7 @@ enum dhd_op_flags { #ifdef DHD_DEBUG #define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ #define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ -#endif // endif +#endif /* DHD_DEBUG */ #ifndef CONFIG_BCMDHD_CLM_PATH #define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob" @@ -391,45 +413,42 @@ enum dhd_op_flags { extern char bus_api_revision[]; enum dhd_bus_wake_state { - WAKE_LOCK_OFF, - WAKE_LOCK_PRIV, - WAKE_LOCK_DPC, - WAKE_LOCK_IOCTL, - WAKE_LOCK_DOWNLOAD, - WAKE_LOCK_TMOUT, - WAKE_LOCK_WATCHDOG, - WAKE_LOCK_LINK_DOWN_TMOUT, - WAKE_LOCK_PNO_FIND_TMOUT, - WAKE_LOCK_SOFTAP_SET, - WAKE_LOCK_SOFTAP_STOP, - WAKE_LOCK_SOFTAP_START, - WAKE_LOCK_SOFTAP_THREAD + WAKE_LOCK_OFF = 0, + WAKE_LOCK_PRIV = 1, + WAKE_LOCK_DPC = 2, + WAKE_LOCK_IOCTL = 3, + WAKE_LOCK_DOWNLOAD = 4, + WAKE_LOCK_TMOUT = 5, + WAKE_LOCK_WATCHDOG = 6, + WAKE_LOCK_LINK_DOWN_TMOUT = 7, + WAKE_LOCK_PNO_FIND_TMOUT = 8, + WAKE_LOCK_SOFTAP_SET = 9, + WAKE_LOCK_SOFTAP_STOP = 10, + WAKE_LOCK_SOFTAP_START = 11, + WAKE_LOCK_SOFTAP_THREAD = 12 }; enum dhd_prealloc_index { - DHD_PREALLOC_PROT = 0, - DHD_PREALLOC_RXBUF, - DHD_PREALLOC_DATABUF, - DHD_PREALLOC_OSL_BUF, -#if defined(STATIC_WL_PRIV_STRUCT) - DHD_PREALLOC_WIPHY_ESCAN0 = 5, -#ifdef DUAL_ESCAN_RESULT_BUFFER - DHD_PREALLOC_WIPHY_ESCAN1, -#endif /* DUAL_ESCAN_RESULT_BUFFER */ -#endif /* STATIC_WL_PRIV_STRUCT */ - DHD_PREALLOC_DHD_INFO = 7, - DHD_PREALLOC_DHD_WLFC_INFO = 8, - DHD_PREALLOC_IF_FLOW_LKUP = 9, + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, /* 10 */ - DHD_PREALLOC_MEMDUMP_RAM = 11, - DHD_PREALLOC_DHD_WLFC_HANGER = 12, - DHD_PREALLOC_PKTID_MAP = 13, - DHD_PREALLOC_PKTID_MAP_IOCTL = 14, - DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, - DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, - DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, DHD_PREALLOC_STAT_REPORT_BUF = 18, - DHD_PREALLOC_WL_ESCAN_INFO = 19, + DHD_PREALLOC_WL_ESCAN = 19, DHD_PREALLOC_FW_VERBOSE_RING = 20, DHD_PREALLOC_FW_EVENT_RING = 21, DHD_PREALLOC_DHD_EVENT_RING = 22, @@ -437,61 +456,70 @@ enum dhd_prealloc_index { }; enum dhd_dongledump_mode { - DUMP_DISABLED = 0, - DUMP_MEMONLY, - DUMP_MEMFILE, - DUMP_MEMFILE_BUGON, - DUMP_MEMFILE_MAX + DUMP_DISABLED = 0, + DUMP_MEMONLY = 1, + DUMP_MEMFILE = 2, + DUMP_MEMFILE_BUGON = 3, + DUMP_MEMFILE_MAX = 4 }; enum dhd_dongledump_type { - DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, - DUMP_TYPE_D3_ACK_TIMEOUT, - DUMP_TYPE_DONGLE_TRAP, - DUMP_TYPE_MEMORY_CORRUPTION, - DUMP_TYPE_PKTID_AUDIT_FAILURE, - DUMP_TYPE_PKTID_INVALID, - DUMP_TYPE_SCAN_TIMEOUT, - DUMP_TYPE_SCAN_BUSY, - DUMP_TYPE_BY_SYSDUMP, - DUMP_TYPE_BY_LIVELOCK, - DUMP_TYPE_AP_LINKUP_FAILURE, - DUMP_TYPE_AP_ABNORMAL_ACCESS, - DUMP_TYPE_CFG_VENDOR_TRIGGERED, - DUMP_TYPE_RESUMED_ON_TIMEOUT_TX, - DUMP_TYPE_RESUMED_ON_TIMEOUT_RX, - DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR, - DUMP_TYPE_TRANS_ID_MISMATCH, - DUMP_TYPE_IFACE_OP_FAILURE, -#ifdef DEBUG_DNGL_INIT_FAIL - DUMP_TYPE_DONGLE_INIT_FAILURE, -#endif /* DEBUG_DNGL_INIT_FAIL */ - DUMP_TYPE_DONGLE_HOST_EVENT, - DUMP_TYPE_SMMU_FAULT, - DUMP_TYPE_RESUMED_UNKNOWN, -#ifdef DHD_ERPOM - DUMP_TYPE_DUE_TO_BT, -#endif /* DHD_ERPOM */ - DUMP_TYPE_LOGSET_BEYOND_RANGE, - DUMP_TYPE_BY_USER + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT = 2, + DUMP_TYPE_DONGLE_TRAP = 3, + DUMP_TYPE_MEMORY_CORRUPTION = 4, + DUMP_TYPE_PKTID_AUDIT_FAILURE = 5, + DUMP_TYPE_PKTID_INVALID = 6, + DUMP_TYPE_SCAN_TIMEOUT = 7, + DUMP_TYPE_SCAN_BUSY = 8, + DUMP_TYPE_BY_SYSDUMP = 9, + DUMP_TYPE_BY_LIVELOCK = 10, + DUMP_TYPE_AP_LINKUP_FAILURE = 11, + DUMP_TYPE_AP_ABNORMAL_ACCESS = 12, + DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13, + DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14, + DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15, + DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16, + DUMP_TYPE_TRANS_ID_MISMATCH = 17, + DUMP_TYPE_IFACE_OP_FAILURE = 18, + DUMP_TYPE_DONGLE_INIT_FAILURE = 19, + DUMP_TYPE_READ_SHM_FAIL = 20, + DUMP_TYPE_DONGLE_HOST_EVENT = 21, + DUMP_TYPE_SMMU_FAULT = 22, + DUMP_TYPE_RESUMED_UNKNOWN = 23, + DUMP_TYPE_DUE_TO_BT = 24, + DUMP_TYPE_LOGSET_BEYOND_RANGE = 25, + DUMP_TYPE_BY_USER = 26, + DUMP_TYPE_CTO_RECOVERY = 27, + DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28, + DUMP_TYPE_PROXD_TIMEOUT = 29, + DUMP_TYPE_PKTID_POOL_DEPLETED = 30 }; enum dhd_hang_reason { - HANG_REASON_MASK = 0x8000, - HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, - HANG_REASON_DONGLE_TRAP = 0x8002, - HANG_REASON_D3_ACK_TIMEOUT = 0x8003, - HANG_REASON_BUS_DOWN = 0x8004, - HANG_REASON_MSGBUF_LIVELOCK = 0x8006, - HANG_REASON_IFACE_DEL_FAILURE = 0x8007, - HANG_REASON_HT_AVAIL_ERROR = 0x8008, - HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, - HANG_REASON_PCIE_PKTID_ERROR = 0x800A, - HANG_REASON_IFACE_ADD_FAILURE = 0x800B, - HANG_REASON_PCIE_LINK_DOWN = 0x8805, - HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806, - HANG_REASON_UNKNOWN = 0x8807, - HANG_REASON_MAX = 0x8808 + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_PCIE_PKTID_ERROR = 0x800A, + HANG_REASON_IFACE_ADD_FAILURE = 0x800B, + HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C, + HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D, + HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E, + HANG_REASON_SCAN_BUSY = 0x800F, + HANG_REASON_BSS_UP_FAILURE = 0x8010, + HANG_REASON_BSS_DOWN_FAILURE = 0x8011, + HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805, + HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806, + HANG_REASON_UNKNOWN = 0x8807, + HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808, + HANG_REASON_PCIE_CTO_DETECT = 0x8809, + HANG_REASON_MAX = 0x880A }; #define WLC_E_DEAUTH_MAX_REASON 0x0FFF @@ -508,7 +536,13 @@ enum dhd_rsdb_scan_features { }; #define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1) + +#ifdef DHD_EWPR_VER2 +#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111 +#else #define VENDOR_SEND_HANG_EXT_INFO_VER 20170905 +#endif // endif + #define HANG_INFO_TRAP_T_NAME_MAX 6 #define HANG_INFO_TRAP_T_REASON_IDX 0 #define HANG_INFO_TRAP_T_SUBTYPE_IDX 2 @@ -527,6 +561,11 @@ enum dhd_rsdb_scan_features { #define HANG_KEY_DEL ' ' #define HANG_RAW_DEL '_' +#ifdef DHD_EWPR_VER2 +#define HANG_INFO_BIGDATA_EXTRA_KEY 4 +#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5 +#endif // endif + /* Packet alignment for most efficient SDIO (can change based on platform) */ #ifndef DHD_SDALIGN #define DHD_SDALIGN 32 @@ -669,6 +708,27 @@ typedef struct { #endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ #ifdef DHD_LOG_DUMP +#define DUMP_SSSR_ATTR_START 2 +#define DUMP_SSSR_ATTR_COUNT 6 + +typedef enum { + SSSR_C0_D11_BEFORE = 0, + SSSR_C0_D11_AFTER = 1, + SSSR_C1_D11_BEFORE = 2, + SSSR_C1_D11_AFTER = 3, + SSSR_DIG_BEFORE = 4, + SSSR_DIG_AFTER = 5 +} EWP_SSSR_DUMP; + +typedef enum { + DLD_BUF_TYPE_GENERAL = 0, + DLD_BUF_TYPE_PRESERVE = 1, + DLD_BUF_TYPE_SPECIAL = 2, + DLD_BUF_TYPE_ECNTRS = 3, + DLD_BUF_TYPE_FILTER = 4, + DLD_BUF_TYPE_ALL = 5 +} log_dump_type_t; + #define LOG_DUMP_MAGIC 0xDEB3DEB3 #define HEALTH_CHK_BUF_SIZE 256 @@ -677,6 +737,11 @@ typedef struct { #define ECNTR_RING_NAME "ewp_ecntr_ring" #endif /* EWP_ECNTRS_LOGGING */ +#ifdef EWP_RTT_LOGGING +#define RTT_RING_ID 0xADCD +#define RTT_RING_NAME "ewp_rtt_ring" +#endif /* EWP_ECNTRS_LOGGING */ + #if defined(DEBUGABILITY) && defined(EWP_ECNTRS_LOGGING) #error "Duplicate rings will be created since both the features are enabled" #endif /* DEBUGABILITY && EWP_ECNTRS_LOGGING */ @@ -690,7 +755,9 @@ typedef enum { LOG_DUMP_SECTION_HEALTH_CHK, LOG_DUMP_SECTION_PRESERVE, LOG_DUMP_SECTION_COOKIE, - LOG_DUMP_SECTION_FLOWRING + LOG_DUMP_SECTION_FLOWRING, + LOG_DUMP_SECTION_STATUS, + LOG_DUMP_SECTION_RTT } log_dump_section_type_t; /* Each section in the debug_dump log file shall begin with a header */ @@ -721,8 +788,6 @@ struct dhd_log_dump_buf extern void dhd_log_dump_write(int type, char *binary_data, int binary_len, const char *fmt, ...); -extern char *dhd_log_dump_get_timestamp(void); -bool dhd_log_dump_ecntr_enabled(void); #endif /* DHD_LOG_DUMP */ /* DEBUG_DUMP SUB COMMAND */ @@ -739,11 +804,21 @@ enum { #define DHD_DUMP_SUBSTR_UNWANTED "_unwanted" #define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected" +#ifdef DNGL_AXI_ERROR_LOGGING +#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error" +#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal" +#endif /* DNGL_AXI_ERROR_LOGGING */ + extern void get_debug_dump_time(char *str); extern void clear_debug_dump_time(char *str); -#define WL_MAX_PRESERVE_BUFFER (NUM_EVENT_LOG_SETS) -#define FW_LOGSET_MASK_ALL 0xFF +#define FW_LOGSET_MASK_ALL 0xFFFFu + +#ifdef WL_MONITOR +#define MONPKT_EXTRA_LEN 48u +#endif /* WL_MONITOR */ + +#define DHDIF_FWDER(dhdif) FALSE #define DHD_COMMON_DUMP_PATH "/data/misc/wifi/" @@ -795,6 +870,51 @@ struct dhd_logtrace_thr_ts { }; #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ +/* Enable Reserve STA flowrings only for Android */ +#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + +typedef enum dhd_induce_error_states +{ + DHD_INDUCE_ERROR_CLEAR = 0x0, + DHD_INDUCE_IOCTL_TIMEOUT = 0x1, + DHD_INDUCE_D3_ACK_TIMEOUT = 0x2, + DHD_INDUCE_LIVELOCK = 0x3, + DHD_INDUCE_DROP_OOB_IRQ = 0x4, + DHD_INDUCE_DROP_AXI_SIG = 0x5, + DHD_INDUCE_ERROR_MAX = 0x6 +} dhd_induce_error_states_t; + +#ifdef DHD_HP2P +#define MAX_TX_HIST_BIN 16 +#define MAX_RX_HIST_BIN 10 +#define MAX_HP2P_FLOWS 16 +#define HP2P_PRIO 7 +#define HP2P_PKT_THRESH 48 +#define HP2P_TIME_THRESH 200 +#define HP2P_PKT_EXPIRY 40 +#define HP2P_TIME_SCALE 32 + +typedef struct hp2p_info { + void *dhd_pub; + uint16 flowid; + bool hrtimer_init; + void *ring; + struct tasklet_hrtimer timer; + uint64 num_pkt_limit; + uint64 num_timer_limit; + uint64 num_timer_start; + uint64 tx_t0[MAX_TX_HIST_BIN]; + uint64 tx_t1[MAX_TX_HIST_BIN]; + uint64 rx_t0[MAX_RX_HIST_BIN]; +} hp2p_info_t; +#endif /* DHD_HP2P */ + +typedef enum { + FW_UNLOADED = 0, + FW_DOWNLOAD_IN_PROGRESS = 1, + FW_DOWNLOAD_DONE = 2 +} fw_download_status_t; + /** * Common structure for module and instance linkage. * Instantiated once per hardware (dongle) instance that this DHD manages. @@ -831,7 +951,11 @@ typedef struct dhd_pub { uint maxctl; /* Max size rxctl request from proto to bus */ uint rxsz; /* Rx buffer size bus module should use */ uint8 wme_dp; /* wme discard priority */ - +#ifdef DNGL_AXI_ERROR_LOGGING + uint32 axierror_logbuf_addr; + bool axi_error; + struct dhd_axi_error_dump *axi_err_dump; +#endif /* DNGL_AXI_ERROR_LOGGING */ /* Dongle media info */ bool iswl; /* Dongle-resident driver is wl */ ulong drv_version; /* Version of dongle-resident driver */ @@ -910,10 +1034,8 @@ typedef struct dhd_pub { */ /* #define WL_ENABLE_P2P_IF 1 */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ - struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ -#endif // endif + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ #ifdef PROP_TXSTATUS bool wlfc_enabled; @@ -963,19 +1085,32 @@ typedef struct dhd_pub { #endif // endif bool dongle_isolation; bool is_pcie_watchdog_reset; + +/* Begin - Variables to track Bus Errors */ bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */ + bool is_sched_error; /* flag to indicate timeout due to scheduling issue */ #ifdef PCIE_FULL_DONGLE bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */ bool livelock_occured; /* flag to indicate livelock occured */ + bool pktid_audit_failed; /* flag to indicate pktid audit failure */ #endif /* PCIE_FULL_DONGLE */ + bool iface_op_failed; /* flag to indicate interface operation failed */ + bool scan_timeout_occurred; /* flag to indicate scan has timedout */ + bool scan_busy_occurred; /* flag to indicate scan busy occurred */ #ifdef BT_OVER_SDIO bool is_bt_recovery_required; #endif // endif -#ifdef DHD_MAP_LOGGING bool smmu_fault_occurred; /* flag to indicate SMMU Fault */ -#endif /* DHD_MAP_LOGGING */ + /* + * Add any new variables to track Bus errors above + * this line. Also ensure that the variable is + * cleared from dhd_clear_bus_errors + */ +/* End - Variables to track Bus Errors */ + int hang_was_sent; + int hang_was_pending; int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ int txcnt_timeout; /* counter txcnt timeout to send HANG */ #ifdef BCMPCIE @@ -1004,6 +1139,7 @@ typedef struct dhd_pub { #endif /* DHDTCPACK_SUPPRESS */ #if defined(ARP_OFFLOAD_SUPPORT) uint32 arp_version; + bool hmac_updated; #endif // endif #if defined(BCMSUP_4WAY_HANDSHAKE) bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */ @@ -1027,6 +1163,8 @@ typedef struct dhd_pub { void *if_flow_lkup; /* per interface flowid lkup hash table */ void *flowid_lock; /* per os lock for flowid info protection */ void *flowring_list_lock; /* per os lock for flowring list protection */ + uint8 max_multi_client_flow_rings; + uint8 multi_client_flow_rings; uint32 num_flow_rings; cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */ @@ -1038,6 +1176,9 @@ typedef struct dhd_pub { bool dma_h2d_ring_upd_support; bool dma_ring_upd_overwrite; /* host overwrites support setting */ + bool hwa_enable; + uint hwa_inited; + bool idma_enable; uint idma_inited; @@ -1054,6 +1195,7 @@ typedef struct dhd_pub { #endif /* DHD_L2_FILTER */ #ifdef DHD_SSSR_DUMP bool sssr_inited; + bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */ sssr_reg_info_v1_t sssr_reg_info; uint8 *sssr_mempool; uint *sssr_d11_before[MAX_NUM_D11CORES]; @@ -1062,6 +1204,7 @@ typedef struct dhd_pub { uint *sssr_dig_buf_before; uint *sssr_dig_buf_after; uint32 sssr_dump_mode; + bool collect_sssr; /* Flag to indicate SSSR dump is required */ #endif /* DHD_SSSR_DUMP */ uint8 *soc_ram; uint32 soc_ram_length; @@ -1097,7 +1240,7 @@ typedef struct dhd_pub { bool h2d_phase_supported; bool force_dongletrap_on_bad_h2d_phase; uint32 dongle_trap_data; - bool fw_download_done; + fw_download_status_t fw_download_status; trap_t last_trap_info; /* trap info from the last trap */ uint8 rand_mac_oui[DOT11_OUI_LEN]; #ifdef DHD_LOSSLESS_ROAMING @@ -1132,6 +1275,9 @@ typedef struct dhd_pub { #ifdef EWP_ECNTRS_LOGGING void *ecntr_dbg_ring; #endif // endif +#ifdef EWP_RTT_LOGGING + void *rtt_dbg_ring; +#endif // endif #ifdef DNGL_EVENT_SUPPORT uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE]; #endif // endif @@ -1159,12 +1305,6 @@ typedef struct dhd_pub { uint pcie_txs_metadata_enable; uint wbtext_policy; /* wbtext policy of dongle */ bool wbtext_support; /* for product policy only */ -#ifdef SHOW_LOGTRACE -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - struct mutex dhd_trace_lock; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif /* SHOW_LOGTRACE */ - bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */ tput_test_t tput_data; uint64 tput_start_ts; @@ -1182,6 +1322,7 @@ typedef struct dhd_pub { struct _dhd_dump_file_manage *dump_file_manage; #endif /* DHD_DUMP_MNGR */ int debug_dump_subcmd; + uint64 debug_dump_time_sec; bool hscb_enable; wait_queue_head_t tx_completion_wait; uint32 batch_tx_pkts_cmpl; @@ -1197,9 +1338,7 @@ typedef struct dhd_pub { bool dongle_edl_support; dhd_dma_buf_t edl_ring_mem; #endif /* EWP_EDL */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) struct mutex ndev_op_sync; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ bool debug_buf_dest_support; uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX]; @@ -1211,22 +1350,63 @@ typedef struct dhd_pub { */ uint32 dhd_rte_time_sync_ms; #endif /* DHD_H2D_LOG_TIME_SYNC */ + int wlc_ver_major; + int wlc_ver_minor; +#ifdef DHD_STATUS_LOGGING + void *statlog; +#endif /* DHD_STATUS_LOGGING */ +#ifdef DHD_HP2P + bool hp2p_enable; + bool hp2p_infra_enable; + bool hp2p_capable; + bool hp2p_ts_capable; + uint16 pkt_thresh; + uint16 time_thresh; + uint16 pkt_expiry; + hp2p_info_t hp2p_info[MAX_HP2P_FLOWS]; + bool hp2p_ring_active; +#endif /* D2H_HP2P */ +#ifdef DHD_DB0TS + bool db0ts_capable; +#endif /* DHD_DB0TS */ + bool event_log_max_sets_queried; + uint32 event_log_max_sets; + uint16 dhd_induce_error; +#ifdef CONFIG_SILENT_ROAM + bool sroam_turn_on; /* Silent roam monitor enable flags */ + bool sroamed; /* Silent roam monitor check flags */ +#endif /* CONFIG_SILENT_ROAM */ + bool extdtxs_in_txcpl; + bool hostrdy_after_init; +#ifdef SUPPORT_SET_TID + uint8 tid_mode; + uint32 target_uid; + uint8 target_tid; +#endif /* SUPPORT_SET_TID */ +#ifdef DHD_PKTDUMP_ROAM + void *pktcnts; +#endif /* DHD_PKTDUMP_ROAM */ + bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */ char *clm_path; /* module_param: path to clm vars file */ char *conf_path; /* module_param: path to config vars file */ struct dhd_conf *conf; /* Bus module handle */ void *adapter; /* adapter information, interrupt, fw path etc. */ + void *event_params; #ifdef BCMDBUS bool dhd_remove; #endif /* BCMDBUS */ -#if defined(WL_WIRELESS_EXT) -#if defined(WL_ESCAN) - void *escan; -#else - void *iscan; +#ifdef WL_ESCAN + struct wl_escan_info *escan; #endif +#if defined(WL_WIRELESS_EXT) + void *wext_info; #endif #ifdef WL_EXT_IAPSTA void *iapsta_params; +#endif + int hostsleep; +#ifdef SENDPROB + bool recv_probereq; #endif } dhd_pub_t; @@ -1263,9 +1443,9 @@ typedef struct dhd_pkttag_fd { void *dmah; /* dma mapper handle */ void *secdma; /* secure dma sec_cma_info handle */ #endif /* !DHD_PCIE_PKTID */ -#ifdef TX_STATUS_LATENCY_STATS +#if defined(TX_STATUS_LATENCY_STATS) uint64 q_time_us; /* time when tx pkt queued to flowring */ -#endif /* TX_STATUS_LATENCY_STATS */ +#endif // endif } dhd_pkttag_fd_t; /* Packet Tag for DHD PCIE Full Dongle */ @@ -1295,11 +1475,11 @@ typedef struct dhd_pkttag_fd { #define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) -#ifdef TX_STATUS_LATENCY_STATS +#if defined(TX_STATUS_LATENCY_STATS) #define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us) #define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \ DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us) -#endif /* TX_STATUS_LATENCY_STATS */ +#endif // endif #endif /* PCIE_FULL_DONGLE */ #if defined(BCMWDF) @@ -1310,7 +1490,7 @@ typedef struct { WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context) #endif /* (BCMWDF) */ - #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + #if defined(CONFIG_PM_SLEEP) #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); #define _DHD_PM_RESUME_WAIT(a, b) do {\ @@ -1358,7 +1538,7 @@ WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_ } \ } while (0) - #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + #endif /* CONFIG_PM_SLEEP */ #ifndef OSL_SLEEP #define OSL_SLEEP(ms) OSL_DELAY(ms*1000) @@ -1399,23 +1579,17 @@ extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhdp->wl_softap_lock); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_lock(&dhdp->wl_softap_lock); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_unlock(&dhdp->wl_softap_lock); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } #ifdef DHD_DEBUG_WAKE_LOCK @@ -1563,7 +1737,7 @@ inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) #define OOB_WAKE_LOCK_TIMEOUT 500 extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); -extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus); + #define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) #define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) #endif /* BCMPCIE_OOB_HOST_WAKE */ @@ -1691,6 +1865,7 @@ extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen , void *adapter #endif ); +extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock); #if defined(WLP2P) && defined(WL_CFG80211) /* To allow attach/detach calls corresponding to p2p0 interface */ extern int dhd_attach_p2p(dhd_pub_t *); @@ -1726,6 +1901,10 @@ extern void dhd_sched_dpc(dhd_pub_t *dhdp); /* Notify tx completion */ extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx); +extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ #ifdef DHD_PCIE_NATIVE_RUNTIMEPM extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp); @@ -1797,7 +1976,7 @@ void dhd_os_logdump_lock(dhd_pub_t *pub); void dhd_os_logdump_unlock(dhd_pub_t *pub); extern int dhd_os_proto_block(dhd_pub_t * pub); extern int dhd_os_proto_unblock(dhd_pub_t * pub); -extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool resched); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition); extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); extern unsigned int dhd_os_get_ioctl_resp_timeout(void); extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); @@ -1813,19 +1992,19 @@ static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_sta /* Bound and delay are fine tuned after several experiments and these * are the best case values to handle bombarding of console logs. */ -#define DHD_EVENT_LOGTRACE_BOUND 1 -#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 200 +#define DHD_EVENT_LOGTRACE_BOUND 10 +/* since FW has event log rate health check (EVENT_LOG_RATE_HC) we can reduce + * the reschedule delay to 10ms +*/ +#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u extern int dhd_os_read_file(void *file, char *buf, uint32 size); extern int dhd_os_seek_file(void *file, int64 offset); void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg); #endif /* SHOW_LOGTRACE */ int dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf, unsigned long buflen); - -#if defined(PCIE_FULL_DONGLE) -extern void dhd_pcie_backplane_access_lock(dhd_pub_t * pub); -extern void dhd_pcie_backplane_access_unlock(dhd_pub_t * pub); -#endif /* defined(PCIE_FULL_DONGLE) */ +int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry, + uint message_number, bool unmask); extern void dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr); @@ -1888,7 +2067,13 @@ extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); #endif /* DHD_FW_COREDUMP */ -void dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode); +void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode); +#ifdef DNGL_AXI_ERROR_LOGGING +void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type); +#endif /* DNGL_AXI_ERROR_LOGGING */ +#ifdef BCMPCIE +void dhd_schedule_cto_recovery(dhd_pub_t *dhdp); +#endif /* BCMPCIE */ #ifdef PKT_FILTER_SUPPORT #define DHD_UNICAST_FILTER_NUM 0 @@ -1917,6 +2102,7 @@ extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); #define MAX_PKTFLT_FIXED_PATTERN_SIZE 32 #define MAX_PKTFLT_FIXED_BUF_SIZE \ (WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2) +#define MAXPKT_ARG 16 #endif /* PKT_FILTER_SUPPORT */ #if defined(BCMPCIE) @@ -1971,30 +2157,6 @@ typedef struct { } dhd_event_log_t; #endif /* SHOW_LOGTRACE */ -#if defined(DHD_NON_DMA_M2M_CORRUPTION) -#define PCIE_DMAXFER_LPBK_LENGTH 4096 -typedef struct dhd_pcie_dmaxfer_lpbk { - union { - uint32 length; - uint32 status; - } u; - uint32 srcdelay; - uint32 destdelay; - uint32 lpbkmode; - uint32 wait; - uint32 core; -} dhd_pcie_dmaxfer_lpbk_t; -#endif /* DHD_NON_DMA_M2M_CORRUPTION */ -enum d11_lpbk_type { - M2M_DMA_LPBK = 0, - D11_LPBK = 1, - BMC_LPBK = 2, - M2M_NON_DMA_LPBK = 3, - D11_HOST_MEM_LPBK = 4, - BMC_HOST_MEM_LPBK = 5, - MAX_LPBK = 6 -}; - #ifdef KEEP_ALIVE extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec); @@ -2235,6 +2397,9 @@ extern uint dhd_roam_disable; /* Roaming mode control */ extern uint dhd_radio_up; +/* TCM verification control */ +extern uint dhd_tcm_test_enable; + /* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ extern int dhd_idletime; #ifdef DHD_USE_IDLECOUNT @@ -2419,6 +2584,17 @@ extern char fw_path2[MOD_PARAM_PATHLEN]; #define VENDOR_PATH "" #endif /* ANDROID_PLATFORM_VERSION */ +#if defined(ANDROID_PLATFORM_VERSION) +#if (ANDROID_PLATFORM_VERSION < 9) +#ifdef WL_STATIC_IF +#undef WL_STATIC_IF +#endif /* WL_STATIC_IF */ +#ifdef WL_STATIC_IFNAME_PREFIX +#undef WL_STATIC_IFNAME_PREFIX +#endif /* WL_STATIC_IFNAME_PREFIX */ +#endif /* ANDROID_PLATFORM_VERSION < 9 */ +#endif /* ANDROID_PLATFORM_VERSION */ + #if defined(DHD_LEGACY_FILE_PATH) #define PLATFORM_PATH "/data/" #elif defined(PLATFORM_SLP) @@ -2427,6 +2603,11 @@ extern char fw_path2[MOD_PARAM_PATHLEN]; #if defined(ANDROID_PLATFORM_VERSION) #if (ANDROID_PLATFORM_VERSION >= 9) #define PLATFORM_PATH "/data/vendor/conn/" +#define DHD_MAC_ADDR_EXPORT +#define DHD_ADPS_BAM_EXPORT +#define DHD_EXPORT_CNTL_FILE +#define DHD_SOFTAP_DUAL_IF_INFO +#define DHD_SEND_HANG_PRIVCMD_ERRORS #else #define PLATFORM_PATH "/data/misc/conn/" #endif /* ANDROID_PLATFORM_VERSION >= 9 */ @@ -2435,6 +2616,10 @@ extern char fw_path2[MOD_PARAM_PATHLEN]; #endif /* ANDROID_PLATFORM_VERSION */ #endif /* DHD_LEGACY_FILE_PATH */ +#ifdef DHD_MAC_ADDR_EXPORT +extern struct ether_addr sysfs_mac_addr; +#endif /* DHD_MAC_ADDR_EXPORT */ + /* Flag to indicate if we should download firmware on driver load */ extern uint dhd_download_fw_on_driverload; #ifndef BCMDBUS @@ -2445,6 +2630,7 @@ extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); extern int dhd_write_file(const char *filepath, char *buf, int buf_len); extern int dhd_read_file(const char *filepath, char *buf, int buf_len); extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len); +extern int dhd_file_delete(char *path); #ifdef READ_MACADDR extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp); @@ -2457,6 +2643,10 @@ extern int dhd_write_macaddr(struct ether_addr *mac); static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; } #endif /* WRITE_MACADDR */ #ifdef USE_CID_CHECK +#define MAX_VNAME_LEN 64 +#ifdef DHD_EXPORT_CNTL_FILE +extern char cidinfostr[MAX_VNAME_LEN]; +#endif /* DHD_EXPORT_CNTL_FILE */ extern int dhd_check_module_cid(dhd_pub_t *dhdp); extern char *dhd_get_cid_info(unsigned char *vid, int vid_length); #else @@ -2568,7 +2758,7 @@ int dhd_common_socram_dump(dhd_pub_t *dhdp); int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen); int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size); - +void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname); uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); @@ -2597,6 +2787,20 @@ enum { PARAM_LAST_VALUE }; extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#ifdef DHD_EXPORT_CNTL_FILE +#define VALUENOTSET 0xFFFFFFFFu +extern uint32 bus_txglom; +extern uint32 roam_off; +#ifdef USE_WL_FRAMEBURST +extern uint32 frameburst; +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF +extern uint32 txbf; +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS +extern uint32 proptx; +#endif /* PROP_TXSTATUS */ +#endif /* DHD_EXPORT_CNTL_FILE */ #endif /* USE_WFA_CERT_CONF */ #define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) @@ -2645,6 +2849,10 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); #define DHD_BUS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) #define DHD_BUS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +/* Enable DHD backplane spin lock/unlock */ +#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + #define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) #define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) @@ -2723,8 +2931,7 @@ int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); void custom_rps_map_clear(struct netdev_rx_queue *queue); #define PRIMARY_INF 0 #define VIRTUAL_INF 1 -#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \ - defined(CONFIG_SOC_EXYNOS8890) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) #define RPS_CPUS_MASK "10" #define RPS_CPUS_MASK_P2P "10" #define RPS_CPUS_MASK_IBSS "10" @@ -2733,7 +2940,7 @@ void custom_rps_map_clear(struct netdev_rx_queue *queue); #define RPS_CPUS_MASK "6" #define RPS_CPUS_MASK_P2P "6" #define RPS_CPUS_MASK_IBSS "6" -#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ #endif // endif int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, @@ -2784,6 +2991,7 @@ void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); #if defined(DHD_LB_STATS) #include extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_deinit(dhd_pub_t *dhd); extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); @@ -2821,12 +3029,9 @@ extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); #define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP #define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP #endif /* !DHD_LB_STATS */ -#ifdef DHD_LB_IRQSET -extern void dhd_irq_set_affinity(dhd_pub_t *dhdp); -#endif /* DHD_LB_IRQSET */ #ifdef DHD_SSSR_DUMP -#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */ +#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */ /* used in sssr_dump_mode */ #define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */ @@ -2836,31 +3041,34 @@ extern int dhd_sssr_mempool_init(dhd_pub_t *dhd); extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd); extern int dhd_sssr_dump_init(dhd_pub_t *dhd); extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd); +extern int dhdpcie_sssr_dump(dhd_pub_t *dhd); +extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path); + #define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp) #define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp) #define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp) #define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp) +#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path) #else -#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0) -#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0) -#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0) -#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0) #endif /* DHD_SSSR_DUMP */ -#ifdef SHOW_LOGTRACE -void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info); -#endif /* SHOW_LOGTRACE */ - #ifdef BCMPCIE extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp); +extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd); +extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd); #else #define dhd_prot_debug_info_print(x) static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub) { return 0; } #endif /* BCMPCIE */ -bool dhd_fw_download_status(dhd_pub_t * dhd_pub); +fw_download_status_t dhd_fw_download_status(dhd_pub_t * dhd_pub); void dhd_show_kirqstats(dhd_pub_t *dhd); /* Bitmask used for Join Timeout */ @@ -2894,27 +3102,22 @@ extern int dhd_stop_timesync_timer(dhd_pub_t *pub); void dhd_pktid_error_handler(dhd_pub_t *dhdp); #endif /* DHD_PKTID_AUDIT_ENABLED */ +#ifdef DHD_MAP_PKTID_LOGGING +extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp); +#endif /* DHD_MAP_PKTID_LOGGING */ + #define DHD_DISABLE_RUNTIME_PM(dhdp) #define DHD_ENABLE_RUNTIME_PM(dhdp) extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info); extern void dhd_prot_dump_ring_ptrs(void *prot_info); -/* - * Enable this macro if you want to track the calls to wake lock - * This records can be printed using the following command - * cat /sys/bcm-dhd/wklock_trace - * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version - */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) -#undef DHD_TRACE_WAKE_LOCK -#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */ - #if defined(DHD_TRACE_WAKE_LOCK) void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); #endif // endif extern bool dhd_query_bus_erros(dhd_pub_t *dhdp); +void dhd_clear_bus_errors(dhd_pub_t *dhdp); #if defined(CONFIG_64BIT) #define DHD_SUPPORT_64BIT @@ -2953,8 +3156,13 @@ int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len); uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp); #ifdef SET_PCIE_IRQ_CPU_CORE -extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set); -extern void set_irq_cpucore(unsigned int irq, int set); +enum { + PCIE_IRQ_AFFINITY_OFF = 0, + PCIE_IRQ_AFFINITY_BIG_CORE_ANY, + PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS, + PCIE_IRQ_AFFINITY_LAST +}; +extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd); #endif /* SET_PCIE_IRQ_CPU_CORE */ #ifdef DHD_WAKE_STATUS @@ -2966,6 +3174,7 @@ extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path); #endif /* DHD_BLOB_EXISTENCE_CHECK */ /* configuration of ecounters. API's tp start/stop. currently supported only for linux */ +extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable); extern int dhd_start_ecounters(dhd_pub_t *dhd); extern int dhd_stop_ecounters(dhd_pub_t *dhd); extern int dhd_start_event_ecounters(dhd_pub_t *dhd); @@ -2977,15 +3186,79 @@ int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask); void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type); void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd); int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, - unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr); -int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos); + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr, + uint32 sec_type); +int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf, + log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type); +int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, + const void *user_buf, unsigned long *f_pos); +int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf); +uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp); int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size); void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp); void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type); int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size); int dhd_logdump_cookie_count(dhd_pub_t *dhdp); -#endif /* DHD_LOG_DUMP */ +int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp, + uint32 len, int type, void *pos); +int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos); +#ifdef DHD_DUMP_PCIE_RINGS +int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp); +#endif /* DHD_DUMP_PCIE_RINGS */ +#ifdef DHD_STATUS_LOGGING +extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, + const void *user_buf, void *fp, uint32 len, void *pos); +extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp); +#endif /* DHD_STATUS_LOGGING */ +int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos); +int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, + char *dump_path, int size); +#if defined(BCMPCIE) +uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp); +#endif +uint32 dhd_get_time_str_len(void); +uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp); +uint32 dhd_get_dld_len(int log_type); +void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr); +extern char *dhd_log_dump_get_timestamp(void); +bool dhd_log_dump_ecntr_enabled(void); +bool dhd_log_dump_rtt_enabled(void); +void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len); +int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type); +int +dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core); +int +dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core); +int +dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len); +int +dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len); + +#ifdef DNGL_AXI_ERROR_LOGGING +extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len); +extern int dhd_os_get_axi_error_dump_size(struct net_device *dev); +extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len); +#endif /* DNGL_AXI_ERROR_LOGGING */ +#endif /* DHD_LOG_DUMP */ +int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos); #define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev) #define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev) @@ -2997,11 +3270,17 @@ typedef struct dhd_pkt_parse { /* ========= RING API functions : exposed to others ============= */ #define DHD_RING_TYPE_FIXED 1 +#define DHD_RING_TYPE_SINGLE_IDX 2 uint32 dhd_ring_get_hdr_size(void); -void *dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt); -void dhd_ring_deinit(void *_ring); +void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size, + uint32 elem_cnt, uint32 type); +void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring); void *dhd_ring_get_first(void *_ring); void dhd_ring_free_first(void *_ring); +void dhd_ring_set_read_idx(void *_ring, uint32 read_idx); +void dhd_ring_set_write_idx(void *_ring, uint32 write_idx); +uint32 dhd_ring_get_read_idx(void *_ring); +uint32 dhd_ring_get_write_idx(void *_ring); void *dhd_ring_get_last(void *_ring); void *dhd_ring_get_next(void *_ring, void *cur); void *dhd_ring_get_prev(void *_ring, void *cur); @@ -3013,6 +3292,8 @@ void *dhd_ring_lock_get_first(void *_ring); void *dhd_ring_lock_get_last(void *_ring); int dhd_ring_lock_get_count(void *_ring); void dhd_ring_lock_free_first(void *ring); +void dhd_ring_whole_lock(void *ring); +void dhd_ring_whole_unlock(void *ring); #define DHD_DUMP_TYPE_NAME_SIZE 32 #define DHD_DUMP_FILE_PATH_SIZE 256 @@ -3033,8 +3314,15 @@ typedef struct _dhd_dump_file_manage { extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname); #endif /* DHD_DUMP_MNGR */ +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); +#endif // endif + #ifdef DHD_DUMP_PCIE_RINGS -extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn); +extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf, + unsigned long *file_posn, bool file_write); #endif /* DHD_DUMP_PCIE_RINGS */ #ifdef EWP_EDL @@ -3066,4 +3354,57 @@ void dhd_h2d_log_time_sync(dhd_pub_t *dhdp); #endif /* DHD_H2D_LOG_TIME_SYNC */ extern void dhd_cleanup_if(struct net_device *net); +#ifdef DNGL_AXI_ERROR_LOGGING +extern void dhd_axi_error(dhd_pub_t *dhd); +#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR +extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp); +#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef DHD_HP2P +extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub); +extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHD_HP2P */ +extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx); + +#ifdef DHD_STATUS_LOGGING +#include +#else +#define ST(x) 0 +#define STDIR(x) 0 +#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \ + do { /* noop */ } while (0) +#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \ + do { BCM_REFERENCE(cond); } while (0) +#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \ + do { /* noop */ } while (0) +#endif /* DHD_STATUS_LOGGING */ + +#ifdef CONFIG_SILENT_ROAM +extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set); +typedef wlc_sroam_info_v1_t wlc_sroam_info_t; +#endif /* CONFIG_SILENT_ROAM */ + +#ifdef SUPPORT_SET_TID +enum dhd_set_tid_mode { + /* Disalbe changing TID */ + SET_TID_OFF = 0, + /* Change TID for all UDP frames */ + SET_TID_ALL_UDP, + /* Change TID for UDP frames based on UID */ + SET_TID_BASED_ON_UID +}; +extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt); +#endif /* SUPPORT_SET_TID */ + +#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL +#define FILE_NAME_HAL_TAG "" +#else +#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */ +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab); +extern uint8 control_he_enab; +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ #endif /* _dhd_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_bus.h b/bcmdhd.100.10.315.x/dhd_bus.h index c5f60bd..72e9d91 100644 --- a/bcmdhd.100.10.315.x/dhd_bus.h +++ b/bcmdhd.100.10.315.x/dhd_bus.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_bus.h 770457 2018-07-03 08:45:49Z $ + * $Id: dhd_bus.h 814378 2019-04-11 02:21:31Z $ */ #ifndef _dhd_bus_h_ @@ -41,6 +41,9 @@ extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len); * Exported from dhd bus module (dhd_usb, dhd_sdio) */ +/* global variable for the bus */ +extern struct dhd_bus *g_dhd_bus; + /* Indicate (dis)interest in finding dongles. */ extern int dhd_bus_register(void); extern void dhd_bus_unregister(void); @@ -71,6 +74,10 @@ extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); #endif // endif +#ifdef BCMPCIE +extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd); +#endif /* BCMPCIE */ + /* Send/receive a control message to/from the dongle. * Expects caller to enforce a single outstanding transaction. */ @@ -138,8 +145,10 @@ extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_nu #if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO)) extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp); #else #define dhd_bus_mem_dump(x) +#define dhd_bus_get_mem_dump(x) #endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */ #ifdef BCMPCIE @@ -210,7 +219,6 @@ extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_ extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); -extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); #ifdef IDLE_TX_FLOW_MGMT extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); @@ -226,14 +234,16 @@ extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); extern int dhd_bus_release_dongle(struct dhd_bus *bus); extern int dhd_bus_request_irq(struct dhd_bus *bus); extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq); +extern void dhd_bus_aer_config(struct dhd_bus *bus); extern struct device * dhd_bus_to_dev(struct dhd_bus *bus); -extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable); +extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable); +extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable); + extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus); #ifdef DHD_FW_COREDUMP -extern struct dhd_bus *g_dhd_bus; extern int dhd_dongle_mem_dump(void); #endif /* DHD_FW_COREDUMP */ @@ -262,9 +272,23 @@ extern int dhd_get_idletime(dhd_pub_t *dhd); #ifdef BCMPCIE extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus); extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp); +extern void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp); +extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp); +extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type); +extern bool dhd_bus_check_driver_up(void); +extern int dhd_bus_get_cto(dhd_pub_t *dhdp); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); +extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp); #else #define dhd_bus_dump_console_buffer(x) static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); } +static INLINE void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) { } +static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; } +static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; } +static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; } +extern INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { } +extern INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; } +static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; } #endif /* BCMPCIE */ #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) @@ -297,6 +321,8 @@ extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus); extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable); extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable); +bool dhd_bus_is_multibp_capable(struct dhd_bus *bus); + #ifdef BCMPCIE extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp); extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd); @@ -305,12 +331,10 @@ extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd); extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus); #ifdef DHD_SSSR_DUMP -extern int dhd_bus_sssr_dump(dhd_pub_t *dhd); - extern int dhd_bus_fis_trigger(dhd_pub_t *dhd); extern int dhd_bus_fis_dump(dhd_pub_t *dhd); - #endif /* DHD_SSSR_DUMP */ + #ifdef PCIE_FULL_DONGLE extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val); #endif /* PCIE_FULL_DONGLE */ @@ -318,4 +342,7 @@ extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val); #ifdef DHD_USE_BP_RESET extern int dhd_bus_perform_bp_reset(struct dhd_bus *bus); #endif /* DHD_USE_BP_RESET */ + +extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus); +extern void dhd_bwm_bt_resume(struct dhd_bus *bus); #endif /* _dhd_bus_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_buzzz.h b/bcmdhd.100.10.315.x/dhd_buzzz.h index 67fec8e..8c03605 100644 --- a/bcmdhd.100.10.315.x/dhd_buzzz.h +++ b/bcmdhd.100.10.315.x/dhd_buzzz.h @@ -3,7 +3,7 @@ /* * Broadcom logging system - Empty implementaiton - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dhd_cdc.c b/bcmdhd.100.10.315.x/dhd_cdc.c index 60cc35e..395bfd8 100644 --- a/bcmdhd.100.10.315.x/dhd_cdc.c +++ b/bcmdhd.100.10.315.x/dhd_cdc.c @@ -1,7 +1,7 @@ /* * DHD Protocol Module for CDC and BDC. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -121,7 +121,7 @@ dhdcdc_msg(dhd_pub_t *dhd) #endif /* BCMDBUS */ #ifdef BCMDBUS - timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); if ((!timeout) || (!prot->ctl_completed)) { DHD_ERROR(("Txctl timeout %d ctl_completed %d\n", timeout, prot->ctl_completed)); @@ -143,7 +143,7 @@ dhdcdc_msg(dhd_pub_t *dhd) /* interrupt polling is sucessfully submitted. Wait for dongle to send * interrupt */ - timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); if (!timeout) { DHD_ERROR(("intr poll wait timed out\n")); } @@ -177,7 +177,7 @@ dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) DHD_OS_IOCTL_RESP_UNLOCK(dhd); goto done; } - timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed); if ((!timeout) || (!prot->ctl_completed)) { DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n", timeout, prot->ctl_completed)); diff --git a/bcmdhd.100.10.315.x/dhd_cfg80211.c b/bcmdhd.100.10.315.x/dhd_cfg80211.c index 306a4e3..9d664f1 100644 --- a/bcmdhd.100.10.315.x/dhd_cfg80211.c +++ b/bcmdhd.100.10.315.x/dhd_cfg80211.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_cfg80211.c 771186 2018-07-09 09:14:04Z $ + * $Id: dhd_cfg80211.c 807961 2019-03-05 05:47:47Z $ */ #include @@ -155,6 +155,8 @@ int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, void wl_cfg80211_cleanup_if(struct net_device *net) { + struct bcm_cfg80211 *cfg = wl_get_cfg(net); + BCM_REFERENCE(cfg); dhd_cleanup_if(net); } diff --git a/bcmdhd.100.10.315.x/dhd_cfg80211.h b/bcmdhd.100.10.315.x/dhd_cfg80211.h index f533b59..dd0766a 100644 --- a/bcmdhd.100.10.315.x/dhd_cfg80211.h +++ b/bcmdhd.100.10.315.x/dhd_cfg80211.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dhd_common.c b/bcmdhd.100.10.315.x/dhd_common.c index f106d78..c5f62fb 100644 --- a/bcmdhd.100.10.315.x/dhd_common.c +++ b/bcmdhd.100.10.315.x/dhd_common.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), common DHD core. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,13 +24,14 @@ * * <> * - * $Id: dhd_common.c 771671 2018-07-11 06:58:25Z $ + * $Id: dhd_common.c 826445 2019-06-20 04:47:47Z $ */ #include #include #include #include +#include #include #include @@ -53,7 +54,6 @@ #include #include -#include #include #include #include <802.1d.h> @@ -61,13 +61,15 @@ #include #include #include +#include +#include #ifdef WL_CFG80211 #include #endif // endif -#ifdef PNO_SUPPORT +#if defined(PNO_SUPPORT) #include -#endif // endif +#endif /* OEM_ANDROID && PNO_SUPPORT */ #ifdef RTT_SUPPORT #include #endif // endif @@ -88,6 +90,10 @@ #include #endif // endif +#if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT) +#include +#endif // endif + #ifdef DHD_L2_FILTER #include #endif /* DHD_L2_FILTER */ @@ -104,7 +110,12 @@ #include #endif /* DHD_LOG_DUMP */ -int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL; +#ifdef DHD_LOG_PRINT_RATE_LIMIT +int log_print_threshold = 0; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ +int dhd_msg_level = DHD_ERROR_VAL;// | DHD_FWLOG_VAL | DHD_EVENT_VAL + /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */ +// | DHD_PKT_MON_VAL; #if defined(WL_WIRELESS_EXT) #include @@ -288,6 +299,24 @@ enum { IOV_TPUT_TEST, IOV_FIS_TRIGGER, IOV_DEBUG_BUF_DEST_STAT, +#ifdef DHD_DEBUG + IOV_INDUCE_ERROR, +#endif /* DHD_DEBUG */ +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + IOV_CONC_DISC, +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + IOV_IFACE_POLICY, +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined(RTT_SUPPORT) && defined(WL_NAN) + IOV_RTT_GEOFENCE_TYPE_OVRD, +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ IOV_LAST }; @@ -374,6 +403,24 @@ const bcm_iovar_t dhd_iovars[] = { {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0}, #endif /* DHD_LOG_DUMP */ {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_DEBUG + {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 }, +#endif /* DHD_DEBUG */ +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 }, +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)}, +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined(RTT_SUPPORT) && defined(WL_NAN) + {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0}, +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ {NULL, 0, 0, 0, 0, 0 } }; @@ -385,43 +432,104 @@ dhd_query_bus_erros(dhd_pub_t *dhdp) bool ret = FALSE; if (dhdp->dongle_reset) { - DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n", + DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n", __FUNCTION__)); ret = TRUE; } if (dhdp->dongle_trap_occured) { - DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n", + DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n", __FUNCTION__)); ret = TRUE; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; dhd_os_send_hang_message(dhdp); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ } if (dhdp->iovar_timeout_occured) { - DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n", + DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n", __FUNCTION__)); ret = TRUE; } #ifdef PCIE_FULL_DONGLE if (dhdp->d3ack_timeout_occured) { - DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n", + DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n", __FUNCTION__)); ret = TRUE; } if (dhdp->livelock_occured) { - DHD_ERROR(("%s: LIVELOCK occurred for previous msg, cannot proceed\n", + DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->pktid_audit_failed) { + DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n", __FUNCTION__)); ret = TRUE; } #endif /* PCIE_FULL_DONGLE */ + if (dhdp->iface_op_failed) { + DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->scan_timeout_occurred) { + DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->scan_busy_occurred) { + DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + +#ifdef DNGL_AXI_ERROR_LOGGING + if (dhdp->axi_error) { + DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + + if (dhd_bus_get_linkdown(dhdp)) { + DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhd_bus_get_cto(dhdp)) { + DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + return ret; } +void +dhd_clear_bus_errors(dhd_pub_t *dhdp) +{ + if (!dhdp) + return; + + dhdp->dongle_reset = FALSE; + dhdp->dongle_trap_occured = FALSE; + dhdp->iovar_timeout_occured = FALSE; +#ifdef PCIE_FULL_DONGLE + dhdp->d3ack_timeout_occured = FALSE; + dhdp->livelock_occured = FALSE; + dhdp->pktid_audit_failed = FALSE; +#endif // endif + dhdp->iface_op_failed = FALSE; + dhdp->scan_timeout_occurred = FALSE; + dhdp->scan_busy_occurred = FALSE; +} + #ifdef DHD_SSSR_DUMP /* This can be overwritten by module parameter defined in dhd_linux.c */ @@ -595,6 +703,52 @@ dhd_sssr_dump_deinit(dhd_pub_t *dhd) return; } +void +dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path) +{ + bool print_info = FALSE; + int dump_mode; + + if (!dhd || !path) { + DHD_ERROR(("%s: dhd or memdump_path is NULL\n", + __FUNCTION__)); + return; + } + + if (!dhd->sssr_dump_collected) { + /* SSSR dump is not collected */ + return; + } + + dump_mode = dhd->sssr_dump_mode; + + if (bcmstrstr(path, "core_0_before")) { + if (dhd->sssr_d11_outofreset[0] && + dump_mode == SSSR_DUMP_MODE_SSSR) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_0_after")) { + if (dhd->sssr_d11_outofreset[0]) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_1_before")) { + if (dhd->sssr_d11_outofreset[1] && + dump_mode == SSSR_DUMP_MODE_SSSR) { + print_info = TRUE; + } + } else if (bcmstrstr(path, "core_1_after")) { + if (dhd->sssr_d11_outofreset[1]) { + print_info = TRUE; + } + } else { + print_info = TRUE; + } + + if (print_info) { + DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__, + path, FILE_NAME_HAL_TAG)); + } +} #endif /* DHD_SSSR_DUMP */ #ifdef DHD_FW_COREDUMP @@ -705,7 +859,7 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz + dhdp->dma_stats.tsbuf_rx_sz)); #endif /* DMAMAP_STATS */ - + bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error); /* Add any prot info */ dhd_prot_dump(dhdp, strbuf); bcm_bprintf(strbuf, "\n"); @@ -855,6 +1009,8 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) #ifdef DUMP_IOCTL_IOV_LIST dhd_iov_li_t *iov_li; #endif /* DUMP_IOCTL_IOV_LIST */ + int hostsleep_set = 0; + int hostsleep_val = 0; #ifdef DHD_PCIE_NATIVE_RUNTIMEPM DHD_OS_WAKE_LOCK(dhd_pub); @@ -907,6 +1063,7 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd)); } #endif /* HW_DISCONNECT_TRACE */ + /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */ if (ioc->set == TRUE) { char *pars = (char *)buf; // points at user buffer @@ -965,7 +1122,11 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) } #endif /* DUMP_IOCTL_IOV_LIST */ + if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len, + &hostsleep_set, &hostsleep_val, &ret)) + goto exit; ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); + dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret); #ifdef DUMP_IOCTL_IOV_LIST if (ret == -ETIMEDOUT) { @@ -1035,9 +1196,9 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) DHD_ERROR(("%s: 'resumed on timeout' error is " "occurred before the interface does not" " bring up\n", __FUNCTION__)); - dhd_pub->busstate = DHD_BUS_DOWN; } +exit: DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); dhd_os_busbusy_wake(dhd_pub); @@ -1946,16 +2107,21 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch #ifdef SHOW_LOGTRACE case IOV_GVAL(IOV_DUMP_TRACE_LOG): { - trace_buf_info_t *trace_buf_info; + trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg; + dhd_dbg_ring_t *dbg_verbose_ring = NULL; + + dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID); + if (dbg_verbose_ring == NULL) { + DHD_ERROR(("dbg_verbose_ring is NULL\n")); + bcmerror = BCME_UNSUPPORTED; + break; + } - trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh, - sizeof(trace_buf_info_t)); if (trace_buf_info != NULL) { - dhd_get_read_buf_ptr(dhd_pub, trace_buf_info); - memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t)); - MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + bzero(trace_buf_info, sizeof(trace_buf_info_t)); + dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info); } else { - DHD_ERROR(("Memory allocation Failed\n")); + DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__)); bcmerror = BCME_NOMEM; } break; @@ -2051,6 +2217,65 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } break; } + +#ifdef DHD_DEBUG + case IOV_SVAL(IOV_INDUCE_ERROR): { + if (int_val >= DHD_INDUCE_ERROR_MAX) { + DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val)); + } else { + dhd_pub->dhd_induce_error = (uint16)int_val; + } + break; + } +#endif /* DHD_DEBUG */ + +#ifdef WL_IFACE_MGMT_CONF +#ifdef WL_CFG80211 +#ifdef WL_NANP2P + case IOV_GVAL(IOV_CONC_DISC): { + int_val = wl_cfg80211_get_iface_conc_disc( + dhd_linux_get_primary_netdev(dhd_pub)); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CONC_DISC): { + bcmerror = wl_cfg80211_set_iface_conc_disc( + dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val); + break; + } +#endif /* WL_NANP2P */ +#ifdef WL_IFACE_MGMT + case IOV_GVAL(IOV_IFACE_POLICY): { + int_val = wl_cfg80211_get_iface_policy( + dhd_linux_get_primary_netdev(dhd_pub)); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_IFACE_POLICY): { + bcmerror = wl_cfg80211_set_iface_policy( + dhd_linux_get_primary_netdev(dhd_pub), + arg, len); + break; + } +#endif /* WL_IFACE_MGMT */ +#endif /* WL_CFG80211 */ +#endif /* WL_IFACE_MGMT_CONF */ +#ifdef RTT_GEOFENCE_CONT +#if defined(RTT_SUPPORT) && defined(WL_NAN) + case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): { + bool enable = 0; + dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable); + int_val = enable ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): { + bool enable = *(bool *)arg; + dhd_rtt_set_geofence_cont_ind(dhd_pub, enable); + break; + } +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_CONT */ default: bcmerror = BCME_UNSUPPORTED; break; @@ -2313,7 +2538,7 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) * and should not return error for IOCTLs fired before FW * Download is done */ - if (dhd_fw_download_status(dhd_pub)) { + if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) { DHD_ERROR(("%s: returning as busstate=%d\n", __FUNCTION__, dhd_pub->busstate)); DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); @@ -2353,7 +2578,13 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is * not used in Production platforms but only used in FC19 setups. */ - if (!bcmstricmp((char *)buf, "devreset")) { + if (!bcmstricmp((char *)buf, "devreset") || +#ifdef BCMPCIE + (dhd_bus_is_multibp_capable(dhd_pub->bus) && + !bcmstricmp((char *)buf, "dwnldstate")) || +#endif /* BCMPCIE */ + FALSE) + { DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); } DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); @@ -2480,8 +2711,8 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, } else if (status == WLC_E_STATUS_TIMEOUT) { DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); } else if (status == WLC_E_STATUS_FAIL) { - DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", - event_name, eabuf, (int)reason)); + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n", + event_name, eabuf, (int)status, (int)reason)); } else { DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", event_name, eabuf, (int)status)); @@ -2500,6 +2731,8 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, auth_str = "Open System"; else if (auth_type == DOT11_SHARED_KEY) auth_str = "Shared Key"; + else if (auth_type == DOT11_SAE) + auth_str = "SAE"; else { snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); auth_str = err_msg; @@ -2514,8 +2747,14 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", event_name, eabuf, auth_str)); } else if (status == WLC_E_STATUS_FAIL) { - DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", - event_name, eabuf, auth_str, (int)reason)); + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n", + event_name, eabuf, auth_str, (int)status, (int)reason)); + } else if (status == WLC_E_STATUS_NO_ACK) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n", + event_name, eabuf, auth_str)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n", + event_name, eabuf, auth_str, (int)status, (int)reason)); } BCM_REFERENCE(auth_str); @@ -2528,7 +2767,7 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); } else { if (status == WLC_E_STATUS_FAIL) { - DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status)); } else if (status == WLC_E_STATUS_NO_NETWORKS) { DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); } else { @@ -2637,7 +2876,36 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, case WLC_E_CCA_CHAN_QUAL: if (datalen) { cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data; - if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) { + if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) { + cca_only_chan_qual_event_t *cca_only_event = + (cca_only_chan_qual_event_t *)cca_event; + BCM_REFERENCE(cca_only_event); + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec)); + DHD_EVENT(( + "\tTOTAL (dur %dms me %dms notme %dms interf %dms" + " ts 0x%08x)\n", + cca_only_event->cca_busy_ext.duration, + cca_only_event->cca_busy_ext.congest_ibss, + cca_only_event->cca_busy_ext.congest_obss, + cca_only_event->cca_busy_ext.interference, + cca_only_event->cca_busy_ext.timestamp)); + DHD_EVENT(( + "\t !PM (dur %dms me %dms notme %dms interf %dms)\n", + cca_only_event->cca_busy_nopm.duration, + cca_only_event->cca_busy_nopm.congest_ibss, + cca_only_event->cca_busy_nopm.congest_obss, + cca_only_event->cca_busy_nopm.interference)); + DHD_EVENT(( + "\t PM (dur %dms me %dms notme %dms interf %dms)\n", + cca_only_event->cca_busy_pm.duration, + cca_only_event->cca_busy_pm.congest_ibss, + cca_only_event->cca_busy_pm.congest_obss, + cca_only_event->cca_busy_pm.interference)); + } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) { DHD_EVENT(( "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms" @@ -2752,6 +3020,13 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason)); break; } + case WLC_E_PROXD: + { + wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data; + DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n", + event_name, proxd->type, reason)); + break; + } case WLC_E_RPSNOA: { rpsnoa_stats_t *stat = event_data; @@ -2767,6 +3042,34 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason)); break; } + case WLC_E_WA_LQM: + { + wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data; + bcm_xtlv_t *subevent; + wl_event_wa_lqm_basic_t *elqm_basic; + + if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) || + (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) { + DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n", + event_name, event_wa_lqm->ver, event_wa_lqm->len)); + break; + } + + subevent = (bcm_xtlv_t *)event_wa_lqm->subevent; + if ((subevent->id != WL_EVENT_WA_LQM_BASIC) || + (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) { + DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n", + event_name, subevent->id, subevent->len)); + break; + } + + elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data; + BCM_REFERENCE(elqm_basic); + DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n", + event_name, elqm_basic->rssi, elqm_basic->snr, + elqm_basic->tx_rate, elqm_basic->rx_rate)); + break; + } default: DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", event_name, event_type, eabuf, (int)status, (int)reason, @@ -2789,7 +3092,7 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, #ifdef DNGL_EVENT_SUPPORT /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */ - int +int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen) { bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata; @@ -2798,6 +3101,95 @@ dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event return BCME_OK; } +#ifdef PARSE_DONGLE_HOST_EVENT +typedef struct hck_id_to_str_s { + uint32 id; + char *name; +} hck_id_to_str_t; + +hck_id_to_str_t hck_sw_id_to_str[] = { + {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"}, + {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"}, + {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"}, + {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"}, + {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"}, + {WL_HC_DD_PHY, "WL_HC_DD_PHY"}, + {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"}, + {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"}, + {0, NULL} +}; + +hck_id_to_str_t hck_pcie_module_to_str[] = { + {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"}, + {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"}, + {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"}, + {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"}, + {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"}, + {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"}, + {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"}, + {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"}, + {0, NULL} +}; + +hck_id_to_str_t hck_rx_stall_v2_to_str[] = { + {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"}, + {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"}, + {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"}, + {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"}, + {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"}, + {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"}, + {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"}, + {0, NULL} +}; + +static void +dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck) +{ + while (hck->name != NULL) { + if (hck->id == id) { + DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name)); + return; + } + hck++; + } +} + +void +dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc) +{ + + wl_rx_hc_info_v2_t *hck_rx_stall_v2; + uint16 id; + + id = ltoh16(wl_hc->id); + + if (id == WL_HC_DD_RX_STALL_V2) { + /* map the hck_rx_stall_v2 structure to the value of the XTLV */ + hck_rx_stall_v2 = + (wl_rx_hc_info_v2_t*)wl_hc; + DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d" + " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n", + hck_rx_stall_v2->type, + hck_rx_stall_v2->length, + hck_rx_stall_v2->if_idx, + hck_rx_stall_v2->ac, + hck_rx_stall_v2->rx_hc_pkts, + hck_rx_stall_v2->rx_hc_dropped_all, + hck_rx_stall_v2->rx_hc_alert_th, + hck_rx_stall_v2->reason, + ETHER_TO_MACF(hck_rx_stall_v2->peer_ea))); + dhd_print_dongle_hck_id( + ltoh32(hck_rx_stall_v2->reason), + hck_rx_stall_v2_to_str); + } else { + dhd_print_dongle_hck_id(ltoh16(wl_hc->id), + hck_sw_id_to_str); + } + +} + +#endif /* PARSE_DONGLE_HOST_EVENT */ + void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, bcm_dngl_event_msg_t *dngl_event, size_t pktlen) @@ -2844,6 +3236,9 @@ dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, DHD_EVENT(("Line:%d ", *(uint32 *)p)); p += sizeof(uint32); DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p)); +#ifdef PARSE_DONGLE_HOST_EVENT + DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n")); +#endif /* PARSE_DONGLE_HOST_EVENT */ break; } case SOCRAM_IND_TAG_HEALTH_CHECK: @@ -2883,6 +3278,11 @@ dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, ltoh32(pcie_hc->pcie_err_ind_type), ltoh32(pcie_hc->pcie_flag), ltoh32(pcie_hc->pcie_control_reg))); +#ifdef PARSE_DONGLE_HOST_EVENT + dhd_print_dongle_hck_id( + ltoh32(pcie_hc->pcie_err_ind_type), + hck_pcie_module_to_str); +#endif /* PARSE_DONGLE_HOST_EVENT */ break; } #ifdef HCHK_COMMON_SW_EVENT @@ -2898,9 +3298,14 @@ dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, return; } BCM_REFERENCE(wl_hc); - DHD_EVENT(("WL SW HC type %d len %d", - ltoh16(wl_hc->id), ltoh16(wl_hc->len))); + DHD_EVENT(("WL SW HC type %d len %d\n", + ltoh16(wl_hc->id), ltoh16(wl_hc->len))); + +#ifdef PARSE_DONGLE_HOST_EVENT + dhd_parse_hck_common_sw_event(wl_hc); +#endif /* PARSE_DONGLE_HOST_EVENT */ break; + } #endif /* HCHK_COMMON_SW_EVENT */ default: @@ -2914,7 +3319,7 @@ dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, break; } default: - DHD_ERROR(("%s:Unknown TAG", __FUNCTION__)); + DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__)); if (p && DHD_EVENT_ON()) { prhex("SOCRAMIND", p, taglen); } @@ -2923,7 +3328,7 @@ dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, break; } default: - DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type)); + DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type)); if (p && DHD_EVENT_ON()) { prhex("SOCRAMIND", p, datalen); } @@ -3014,6 +3419,9 @@ wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen uint evlen; int ret; uint16 usr_subtype; +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + dhd_if_t *ifp = NULL; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ ret = wl_host_event_get_data(pktdata, pktlen, &evu); if (ret != BCME_OK) { @@ -3186,7 +3594,9 @@ wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen #endif // endif #if defined(RTT_SUPPORT) case WLC_E_PROXD: +#ifndef WL_CFG80211 dhd_rtt_event_handler(dhd_pub, event, (void *)event_data); +#endif /* WL_CFG80211 */ break; #endif /* RTT_SUPPORT */ /* These are what external supplicant/authenticator wants */ @@ -3236,6 +3646,23 @@ wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen } break; #endif /* WL_NAN */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + case WLC_E_REASSOC: + ifp = dhd_get_ifp(dhd_pub, event->ifidx); + + if (!ifp) + break; + + /* Consider STA role only since roam is disabled on P2P GC. + * Drop EAPOL M1 frame only if roam is done to same BSS. + */ + if ((status == WLC_E_STATUS_SUCCESS) && + IS_STA_IFACE(ndev_to_wdev(ifp->net)) && + wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) { + ifp->recv_reassoc_evt = TRUE; + } + break; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ case WLC_E_LINK: #ifdef PCIE_FULL_DONGLE if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, @@ -3290,8 +3717,15 @@ wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen } } #endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + /* fall through */ + ifp = dhd_get_ifp(dhd_pub, event->ifidx); + if (ifp) { + ifp->recv_reassoc_evt = FALSE; + ifp->post_roam_evt = FALSE; + } +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ /* fall through */ - default: *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); #ifdef DHD_UPDATE_INTF_MAC @@ -3320,6 +3754,13 @@ wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ); #endif // endif +#ifdef DHD_STATUS_LOGGING + if (dhd_pub->statlog) { + dhd_statlog_process_event(dhd_pub, type, *ifidx, + status, reason, flags); + } +#endif /* DHD_STATUS_LOGGING */ + #ifdef SHOW_EVENTS if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { wl_show_host_event(dhd_pub, event, @@ -3366,7 +3807,6 @@ dhd_print_buf(void *pbuf, int len, int bytes_per_line) #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) #endif // endif -#if defined(PKT_FILTER_SUPPORT) /* Convert user's input in hex pattern to byte-size mask */ int wl_pattern_atoh(char *src, char *dst) @@ -3392,6 +3832,7 @@ wl_pattern_atoh(char *src, char *dst) return i; } +#if defined(PKT_FILTER_SUPPORT) int pattern_atoh_len(char *src, char *dst, int len) { @@ -3554,7 +3995,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) int rc = -1; uint32 mask_size; uint32 pattern_size; - char *argv[16], * buf = 0; + char *argv[MAXPKT_ARG] = {0}, * buf = 0; int i = 0; char *arg_save = 0, *arg_org = 0; @@ -3582,8 +4023,13 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) } argv[i] = bcmstrtok(&arg_save, " ", 0); - while (argv[i++]) + while (argv[i++]) { + if (i >= MAXPKT_ARG) { + DHD_ERROR(("Invalid args provided\n")); + goto fail; + } argv[i] = bcmstrtok(&arg_save, " ", 0); + } i = 0; if (argv[i] == NULL) { @@ -3802,7 +4248,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) rc = rc >= 0 ? 0 : rc; if (rc) - DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n", __FUNCTION__, arg, rc)); else DHD_TRACE(("%s: successfully added pktfilter %s\n", @@ -3858,7 +4304,15 @@ void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) { int retcode; - +#ifdef WL_CFG80211 + /* Do not enable arp offload in case of non-STA interfaces active */ + if (arp_enable && + (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) { + DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n", + __FUNCTION__)); + return; + } +#endif /* WL_CFG80211 */ retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", arp_enable, WLC_SET_VAR, TRUE, 0); @@ -3867,8 +4321,13 @@ dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n", __FUNCTION__, arp_enable, retcode)); else +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); +#else DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n", __FUNCTION__, arp_enable)); +#endif /* DHD_LOG_DUMP */ if (arp_enable) { uint32 version; retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", @@ -3897,6 +4356,15 @@ dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE); if (ret < 0) DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + else { +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__)); +#else + DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } + /* mac address isn't cleared here but it will be cleared after dongle off */ + dhd->hmac_updated = 0; } void @@ -3911,6 +4379,13 @@ dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE); if (ret < 0) DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + else { +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__)); +#else + DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } } void @@ -3924,11 +4399,17 @@ dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr), NULL, 0, TRUE); - if (ret) + if (ret < 0) DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret)); - else - DHD_ARPOE(("%s: sARP H ipaddr entry added \n", - __FUNCTION__)); + else { + /* mac address is updated in the dongle */ + dhd->hmac_updated = 1; +#ifdef DHD_LOG_DUMP + DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__)); +#else + DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__)); +#endif /* DHD_LOG_DUMP */ + } } int @@ -4520,6 +5001,12 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval) int ret = -1; int allowed_skip_dtim_cnt = 0; + if (dhd->disable_dtim_in_suspend) { + DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__)); + bcn_li_dtim = 0; + return bcn_li_dtim; + } + /* Check if associated */ if (dhd_is_associated(dhd, 0, NULL) == FALSE) { DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); @@ -4596,6 +5083,12 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) int ap_beacon = 0; int allowed_skip_dtim_cnt = 0; + if (dhd->disable_dtim_in_suspend) { + DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__)); + bcn_li_dtim = 0; + goto exit; + } + /* Check if associated */ if (dhd_is_associated(dhd, 0, NULL) == FALSE) { DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); @@ -4665,6 +5158,66 @@ exit: } #endif /* OEM_ANDROID && BCMPCIE */ +#ifdef CONFIG_SILENT_ROAM +int +dhd_sroam_set_mon(dhd_pub_t *dhd, bool set) +{ + int ret = BCME_OK; + wlc_sroam_t *psroam; + wlc_sroam_info_t *sroam; + uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN; + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc\n", __FUNCTION__)); + return ret; + } + + if (set && (dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) { + DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode)); + return ret; + } + + if (!dhd->sroam_turn_on) { + DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on)); + return ret; + } + psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen); + if (!psroam) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret)); + goto done; + } + + if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) { + ret = BCME_VERSION; + goto done; + } + + sroam = (wlc_sroam_info_t *)psroam->data; + sroam->sroam_on = set; + DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off")); + + ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret)); + } + +done: + if (psroam) { + MFREE(dhd->osh, psroam, sroamlen); + } + return ret; + +} +#endif /* CONFIG_SILENT_ROAM */ + /* Check if the mode supports STA MODE */ bool dhd_support_sta_mode(dhd_pub_t *dhd) { @@ -5267,103 +5820,6 @@ void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) MFREE(dhd->osh, buffer, length); } -#if defined(DHD_8021X_DUMP) -#define EAP_PRINT(str) \ - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \ - ifname, direction ? "TX" : "RX")); -#else -#define EAP_PRINT(str) -#endif /* DHD_8021X_DUMP */ -/* Parse EAPOL 4 way handshake messages */ -void -dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname, - char *dump_data, bool direction) -{ - unsigned char type; - int pair, ack, mic, kerr, req, sec, install; - unsigned short us_tmp; - - type = dump_data[15]; - if (type == 0) { - if ((dump_data[22] == 1) && (dump_data[18] == 1)) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_REQID; - EAP_PRINT("EAP Packet, Request, Identity"); - } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_RSPID; - EAP_PRINT("EAP Packet, Response, Identity"); - } else if (dump_data[22] == 254) { - if (dump_data[30] == 1) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_WSC_START; - EAP_PRINT("EAP Packet, WSC Start"); - } else if (dump_data[30] == 4) { - if (dump_data[41] == 4) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1; - EAP_PRINT("EAP Packet, WPS M1"); - } else if (dump_data[41] == 5) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2; - EAP_PRINT("EAP Packet, WPS M2"); - } else if (dump_data[41] == 7) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3; - EAP_PRINT("EAP Packet, WPS M3"); - } else if (dump_data[41] == 8) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4; - EAP_PRINT("EAP Packet, WPS M4"); - } else if (dump_data[41] == 9) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5; - EAP_PRINT("EAP Packet, WPS M5"); - } else if (dump_data[41] == 10) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6; - EAP_PRINT("EAP Packet, WPS M6"); - } else if (dump_data[41] == 11) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7; - EAP_PRINT("EAP Packet, WPS M7"); - } else if (dump_data[41] == 12) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8; - EAP_PRINT("EAP Packet, WPS M8"); - } - } else if (dump_data[30] == 5) { - dhd->conf->eapol_status = EAPOL_STATUS_WPS_DONE; - EAP_PRINT("EAP Packet, WSC Done"); - } - } else { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", - ifname, direction ? "TX" : "RX", - dump_data[14], dump_data[15], dump_data[30])); - } - } else if (type == 3 && dump_data[18] == 2) { - us_tmp = (dump_data[19] << 8) | dump_data[20]; - pair = 0 != (us_tmp & 0x08); - ack = 0 != (us_tmp & 0x80); - mic = 0 != (us_tmp & 0x100); - kerr = 0 != (us_tmp & 0x400); - req = 0 != (us_tmp & 0x800); - sec = 0 != (us_tmp & 0x200); - install = 0 != (us_tmp & 0x40); - - if (!sec && !mic && ack && !install && pair && !kerr && !req) { - dhd->conf->eapol_status = EAPOL_STATUS_WPA_M1; - EAP_PRINT("EAPOL Packet, 4-way handshake, M1"); - } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { - dhd->conf->eapol_status = EAPOL_STATUS_WPA_M2; - EAP_PRINT("EAPOL Packet, 4-way handshake, M2"); - } else if (pair && ack && mic && sec && !kerr && !req) { - dhd->conf->eapol_status = EAPOL_STATUS_WPA_M3; - EAP_PRINT("EAPOL Packet, 4-way handshake, M3"); - } else if (pair && !install && !ack && mic && sec && !req && !kerr) { - dhd->conf->eapol_status = EAPOL_STATUS_WPA_M4; - EAP_PRINT("EAPOL Packet, 4-way handshake, M4"); - } else { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", - ifname, direction ? "TX" : "RX", - dump_data[14], dump_data[15], dump_data[30])); - } - } else { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", - ifname, direction ? "TX" : "RX", - dump_data[14], dump_data[15], dump_data[30])); - } -} - #ifdef SHOW_LOGTRACE int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, @@ -5425,7 +5881,7 @@ dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, goto error; } /* fwid is at the end of fw bin in string format */ - if (dhd_os_seek_file(file, file_len - sizeof(fwid_str) - 1) < 0) { + if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) { DHD_ERROR(("%s: can't seek file \n", __FUNCTION__)); goto error; } @@ -5457,7 +5913,9 @@ dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, /* check if fw id in logstrs.bin matches the fw one */ if (hdr->trailer.fw_id != fwid) { - DHD_ERROR(("%s: logstr id does not match FW!\n", __FUNCTION__)); + DHD_ERROR(("%s: logstr id does not match FW!" + "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n", + __FUNCTION__, hdr->trailer.fw_id, fwid)); goto error; } @@ -5855,7 +6313,7 @@ static ecounters_cfg_t ecounters_cfg_tbl[] = { static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = { /* Interface specific event ecounters */ - {WLC_E_LINK, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS}, + {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS}, }; /* Accepts an argument to -s, -g or -f and creates an XTLV */ @@ -5945,7 +6403,7 @@ dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask) memset(&logset_op, 0, sizeof(logset_op)); logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION); logset_type.len = htod16(sizeof(wl_el_set_type_t)); - for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++) { + for (i = 0; i < dhd->event_log_max_sets; i++) { logset_type.set = i; err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE); @@ -5963,12 +6421,53 @@ dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask) return ret; } +static int +dhd_ecounter_autoconfig(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + uint32 buf; + rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + + if (rc != BCME_OK) { + + if (rc != BCME_UNSUPPORTED) { + rc = BCME_OK; + DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc)); + } else { + DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__)); + } + } + + return rc; +} + +int +dhd_ecounter_configure(dhd_pub_t *dhd, bool enable) +{ + int rc = BCME_OK; + if (enable) { + if (dhd_ecounter_autoconfig(dhd) != BCME_OK) { + if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); + } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); + } + } + } else { + if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__)); + } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__)); + } + } + return rc; +} + int dhd_start_ecounters(dhd_pub_t *dhd) { uint8 i = 0; uint8 *start_ptr; - uint32 buf; int rc = BCME_OK; bcm_xtlv_t *elt; ecounters_config_request_v2_t *req = NULL; @@ -5976,13 +6475,6 @@ dhd_start_ecounters(dhd_pub_t *dhd) ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL; uint16 total_processed_containers_len = 0; - rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE); - - if (rc != BCME_UNSUPPORTED) - return rc; - - rc = BCME_OK; - for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) { ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i]; @@ -6271,9 +6763,78 @@ fail: } #ifdef DHD_LOG_DUMP +int +dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf, + log_dump_section_hdr_t *sec_hdr, + char *text_hdr, int buflen, uint32 sec_type) +{ + uint32 rlen = 0; + uint32 data_len = 0; + void *data = NULL; + unsigned long flags = 0; + int ret = 0; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr; + int pos = 0; + int fpos_sechdr = 0; + + if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) { + return BCME_BADARG; + } + /* do not allow further writes to the ring + * till we flush it + */ + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_SUSPEND; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + if (dhdp->concise_dbg_buf) { + /* re-use concise debug buffer temporarily + * to pull ring data, to write + * record by record to file + */ + data_len = CONCISE_DUMP_BUFLEN; + data = dhdp->concise_dbg_buf; + ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos); + /* write the section header now with zero length, + * once the correct length is found out, update + * it later + */ + fpos_sechdr = pos; + sec_hdr->type = sec_type; + sec_hdr->length = 0; + ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf, + sizeof(*sec_hdr), &pos); + do { + rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE); + if (rlen > 0) { + /* write the log */ + ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos); + } + DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen)); + } while ((rlen > 0)); + /* now update the section header length in the file */ + /* Complete ring size is dumped by HAL, hence updating length to ring size */ + sec_hdr->length = ring->ring_size; + ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf, + sizeof(*sec_hdr), &fpos_sechdr); + } else { + DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__)); + } + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + /* Resetting both read and write pointer, + * since all items are read. + */ + ring->rp = ring->wp = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return ret; +} + int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, - unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr) + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, + char *text_hdr, uint32 sec_type) { uint32 rlen = 0; uint32 data_len = 0, total_len = 0; @@ -6283,7 +6844,8 @@ dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, int ret = 0; dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr; - if (!dhdp || !ring || !file || !sec_hdr || !file_posn) + if (!dhdp || !ring || !file || !sec_hdr || + !file_posn || !text_hdr) return BCME_BADARG; /* do not allow further writes to the ring @@ -6300,14 +6862,15 @@ dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, */ data_len = CONCISE_DUMP_BUFLEN; data = dhdp->concise_dbg_buf; - dhd_os_write_file_posn(file, file_posn, ECNTRS_LOG_HDR, - strlen(ECNTRS_LOG_HDR)); + dhd_os_write_file_posn(file, file_posn, text_hdr, + strlen(text_hdr)); /* write the section header now with zero length, * once the correct length is found out, update * it later */ + dhd_init_sec_hdr(sec_hdr); fpos_sechdr = *file_posn; - sec_hdr->type = LOG_DUMP_SECTION_ECNTRS; + sec_hdr->type = sec_type; sec_hdr->length = 0; dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr, sizeof(*sec_hdr)); @@ -6335,8 +6898,11 @@ dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, DHD_DBG_RING_LOCK(ring->lock, flags); ring->state = RING_ACTIVE; + /* Resetting both read and write pointer, + * since all items are read. + */ + ring->rp = ring->wp = 0; DHD_DBG_RING_UNLOCK(ring->lock, flags); - return BCME_OK; } @@ -6360,8 +6926,9 @@ dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size) return BCME_ERROR; } - dhdp->logdump_cookie = dhd_ring_init(buf, buf_size, - LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT); + dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size, + LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT, + DHD_RING_TYPE_FIXED); if (!dhdp->logdump_cookie) { DHD_ERROR(("FAIL TO INIT COOKIE RING\n")); return BCME_ERROR; @@ -6377,7 +6944,7 @@ dhd_logdump_cookie_deinit(dhd_pub_t *dhdp) return; } if (dhdp->logdump_cookie) { - dhd_ring_deinit(dhdp->logdump_cookie); + dhd_ring_deinit(dhdp, dhdp->logdump_cookie); } return; @@ -6437,13 +7004,19 @@ dhd_logdump_cookie_count(dhd_pub_t *dhdp) static inline int __dhd_log_dump_cookie_to_file( - dhd_pub_t *dhdp, void *fp, unsigned long *f_pos, char *buf, uint32 buf_size) + dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos, + char *buf, uint32 buf_size) { uint32 remain = buf_size; int ret = BCME_ERROR; char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; log_dump_section_hdr_t sec_hdr; + uint32 read_idx; + uint32 write_idx; + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); while (dhd_logdump_cookie_count(dhdp) > 0) { memset(tmp_buf, 0, sizeof(tmp_buf)); ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); @@ -6452,7 +7025,10 @@ __dhd_log_dump_cookie_to_file( } remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf); } - ret = dhd_os_write_file_posn(fp, f_pos, COOKIE_LOG_HDR, strlen(COOKIE_LOG_HDR)); + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + + ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos); if (ret < 0) { DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__)); return ret; @@ -6461,13 +7037,14 @@ __dhd_log_dump_cookie_to_file( sec_hdr.timestamp = local_clock(); sec_hdr.type = LOG_DUMP_SECTION_COOKIE; sec_hdr.length = buf_size - remain; - ret = dhd_os_write_file_posn(fp, f_pos, (char *)&sec_hdr, sizeof(sec_hdr)); + + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos); if (ret < 0) { DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__)); return ret; } - ret = dhd_os_write_file_posn(fp, f_pos, buf, sec_hdr.length); + ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos); if (ret < 0) { DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__)); } @@ -6475,14 +7052,116 @@ __dhd_log_dump_cookie_to_file( return ret; } +uint32 +dhd_log_dump_cookie_len(dhd_pub_t *dhdp) +{ + int len = 0; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + char *buf = NULL; + int ret = BCME_ERROR; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + uint32 read_idx; + uint32 write_idx; + uint32 remain; + + remain = buf_size; + + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL)); + goto exit; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto exit; + } + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + goto exit; + } + remain -= (uint32)strlen(tmp_buf); + } + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + len += strlen(COOKIE_LOG_HDR); + len += sizeof(sec_hdr); + len += (buf_size - remain); +exit: + if (buf) + MFREE(dhdp->osh, buf, buf_size); + return len; +} + +int +dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf) +{ + int ret = BCME_ERROR; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + char *buf = NULL; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + int pos = 0; + uint32 read_idx; + uint32 write_idx; + uint32 remain; + + remain = buf_size; + + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL)); + goto exit; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + goto exit; + } + + read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie); + write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie); + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + goto exit; + } + remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf); + } + dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx); + dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx); + ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos); + sec_hdr.magic = LOG_DUMP_MAGIC; + sec_hdr.timestamp = local_clock(); + sec_hdr.type = LOG_DUMP_SECTION_COOKIE; + sec_hdr.length = buf_size - remain; + ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos); + ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos); +exit: + if (buf) + MFREE(dhdp->osh, buf, buf_size); + return ret; +} + int -dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos) +dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos) { char *buf; int ret = BCME_ERROR; uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; - if (!dhdp || !dhdp->logdump_cookie ||!fp || !f_pos) { + if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) { DHD_ERROR(("%s At least one ptr is NULL " "dhdp = %p cookie %p fp = %p f_pos = %p\n", __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos)); @@ -6494,7 +7173,7 @@ dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos) DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); return ret; } - ret = __dhd_log_dump_cookie_to_file(dhdp, fp, f_pos, buf, buf_size); + ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size); MFREE(dhdp->osh, buf, buf_size); return ret; @@ -6503,10 +7182,14 @@ dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos) #endif /* DHD_LOG_DUMP */ #ifdef DHD_LOG_DUMP +#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4 void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd) { +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) log_dump_type_t *flush_type; +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + uint64 current_time_sec; if (!dhdp) { DHD_ERROR(("dhdp is NULL !\n")); @@ -6518,23 +7201,45 @@ dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd) return; } - flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t)); - if (!flush_type) { - DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__)); + current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + + DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n", + __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec, + DEBUG_DUMP_TRIGGER_INTERVAL_SEC)); + + if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) { + DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n", + __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC)); return; } + clear_debug_dump_time(dhdp->debug_dump_time_str); /* */ dhdp->debug_dump_subcmd = subcmd; + dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + /* flush_type is freed at do_dhd_log_dump function */ + flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t)); if (flush_type) { *flush_type = DLD_BUF_TYPE_ALL; dhd_schedule_log_dump(dhdp, flush_type); + } else { + DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__)); + return; } +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + /* Inside dhd_mem_dump, event notification will be sent to HAL and + * from other context DHD pushes memdump, debug_dump and pktlog dump + * to HAL and HAL will write into file + */ #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP) dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; dhd_bus_mem_dump(dhdp); #endif /* BCMPCIE && DHD_FW_COREDUMP */ + } #endif /* DHD_LOG_DUMP */ @@ -6730,3 +7435,34 @@ void dhd_h2d_log_time_sync(dhd_pub_t *dhd) } } #endif /* DHD_H2D_LOG_TIME_SYNC */ + +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +int +dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab) +{ + int ret = BCME_OK; + bcm_xtlv_t *pxtlv = NULL; + uint8 mybuf[DHD_IOVAR_BUF_SIZE]; + uint16 mybuf_len = sizeof(mybuf); + pxtlv = (bcm_xtlv_t *)mybuf; + + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab), + &he_enab, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret))); + return ret; + } + + ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n", + __FUNCTION__, he_enab, bcmerrorstr(ret))); + } else { + DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab)); + } + + return ret; +} +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ diff --git a/bcmdhd.100.10.315.x/dhd_config.c b/bcmdhd.100.10.315.x/dhd_config.c index a3eb9be..3d33ea4 100644 --- a/bcmdhd.100.10.315.x/dhd_config.c +++ b/bcmdhd.100.10.315.x/dhd_config.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -11,40 +12,57 @@ #include #include #endif +#ifdef WL_CFG80211 +#include +#endif #include #include +#include /* message levels */ -#define CONFIG_ERROR_LEVEL 0x0001 -#define CONFIG_TRACE_LEVEL 0x0002 +#define CONFIG_ERROR_LEVEL (1 << 0) +#define CONFIG_TRACE_LEVEL (1 << 1) +#define CONFIG_MSG_LEVEL (1 << 15) -uint config_msg_level = CONFIG_ERROR_LEVEL; +uint config_msg_level = CONFIG_ERROR_LEVEL | CONFIG_MSG_LEVEL; -#define CONFIG_ERROR(x) \ +#define CONFIG_MSG(x, args...) \ + do { \ + if (config_msg_level & CONFIG_MSG_LEVEL) { \ + printk(KERN_ERR "[dhd] %s : " x, __func__, ## args); \ + } \ + } while (0) +#define CONFIG_ERROR(x, args...) \ do { \ if (config_msg_level & CONFIG_ERROR_LEVEL) { \ - printk(KERN_ERR "CONFIG-ERROR) "); \ - printk x; \ + printk(KERN_ERR "[dhd] CONFIG-ERROR) %s : " x, __func__, ## args); \ } \ } while (0) -#define CONFIG_TRACE(x) \ +#define CONFIG_TRACE(x, args...) \ do { \ if (config_msg_level & CONFIG_TRACE_LEVEL) { \ - printk(KERN_ERR "CONFIG-TRACE) "); \ - printk x; \ + printk(KERN_ERR "[dhd] CONFIG-TRACE) %s : " x, __func__, ## args); \ } \ } while (0) #define MAXSZ_BUF 1000 #define MAXSZ_CONFIG 4096 +#ifndef WL_CFG80211 #define htod32(i) i #define htod16(i) i #define dtoh32(i) i #define dtoh16(i) i #define htodchanspec(i) i #define dtohchanspec(i) i +#endif + +#if defined(SUSPEND_EVENT) && defined(PROP_TXSTATUS) +#if defined(BCMSDIO) || defined(BCMDBUS) +#include +#endif /* BCMSDIO || BCMDBUS */ +#endif /* SUSPEND_EVENT && PROP_TXSTATUS */ #define MAX_EVENT_BUF_NUM 16 typedef struct eventmsg_buf { @@ -59,53 +77,59 @@ typedef struct cihp_name_map_t { uint chip; uint chiprev; uint ag_type; - bool clm; char *chip_name; char *module_name; } cihp_name_map_t; /* Map of WLC_E events to connection failure strings */ #define DONT_CARE 9999 -const cihp_name_map_t chip_name_map [] = { - /* ChipID Chiprev AG CLM ChipName ModuleName */ +const cihp_name_map_t chip_name_map[] = { + /* ChipID Chiprev AG ChipName ModuleName */ #ifdef BCMSDIO - {BCM43362_CHIP_ID, 0, DONT_CARE, FALSE, "bcm40181a0", ""}, - {BCM43362_CHIP_ID, 1, DONT_CARE, FALSE, "bcm40181a2", ""}, - {BCM4330_CHIP_ID, 4, FW_TYPE_G, FALSE, "bcm40183b2", ""}, - {BCM4330_CHIP_ID, 4, FW_TYPE_AG, FALSE, "bcm40183b2_ag", ""}, - {BCM43430_CHIP_ID, 0, DONT_CARE, FALSE, "bcm43438a0", "ap6212"}, - {BCM43430_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43438a1", "ap6212a"}, - {BCM43430_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43436b0", "ap6236"}, - {BCM43012_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43013b0", ""}, - {BCM4334_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4334b1_ag", ""}, - {BCM43340_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""}, - {BCM43341_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""}, - {BCM4324_CHIP_ID, 5, DONT_CARE, FALSE, "bcm43241b4_ag", ""}, - {BCM4335_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4339a0_ag", ""}, - {BCM4339_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4339a0_ag", "ap6335"}, - {BCM4345_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", "ap6255"}, - {BCM43454_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", ""}, - {BCM4345_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", "ap6256"}, - {BCM43454_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", ""}, - {BCM4354_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4354a1_ag", ""}, - {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", "ap6356"}, - {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""}, - {BCM4371_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""}, - {BCM43569_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4358a3_ag", ""}, - {BCM4359_CHIP_ID, 5, DONT_CARE, FALSE, "bcm4359b1_ag", ""}, - {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_ag", "ap6398s"}, - {BCM43751_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43751a1_ag", "ap6271s"}, + {BCM43362_CHIP_ID, 0, DONT_CARE, "bcm40181a0", ""}, + {BCM43362_CHIP_ID, 1, DONT_CARE, "bcm40181a2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_G, "bcm40183b2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_AG, "bcm40183b2_ag", ""}, + {BCM43430_CHIP_ID, 0, DONT_CARE, "bcm43438a0", "ap6212"}, + {BCM43430_CHIP_ID, 1, DONT_CARE, "bcm43438a1", "ap6212a"}, + {BCM43430_CHIP_ID, 2, DONT_CARE, "bcm43436b0", "ap6236"}, + {BCM43012_CHIP_ID, 1, FW_TYPE_G, "bcm43013b0", ""}, + {BCM43012_CHIP_ID, 1, FW_TYPE_AG, "bcm43013c0_ag", ""}, + {BCM4334_CHIP_ID, 3, DONT_CARE, "bcm4334b1_ag", ""}, + {BCM43340_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""}, + {BCM43341_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""}, + {BCM4324_CHIP_ID, 5, DONT_CARE, "bcm43241b4_ag", ""}, + {BCM4335_CHIP_ID, 2, DONT_CARE, "bcm4339a0_ag", ""}, + {BCM4339_CHIP_ID, 1, DONT_CARE, "bcm4339a0_ag", "ap6335"}, + {BCM4345_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", "ap6255"}, + {BCM43454_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", ""}, + {BCM4345_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", "ap6256"}, + {BCM43454_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", ""}, + {BCM4354_CHIP_ID, 1, DONT_CARE, "bcm4354a1_ag", ""}, + {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", "ap6356"}, + {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""}, + {BCM4371_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""}, + {BCM43569_CHIP_ID, 3, DONT_CARE, "bcm4358a3_ag", ""}, + {BCM4359_CHIP_ID, 5, DONT_CARE, "bcm4359b1_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_ag", "ap6398s"}, + {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_ag", ""}, + {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_ag", ""}, + {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_ag", ""}, + {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_ag", ""}, #endif #ifdef BCMPCIE - {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""}, - {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""}, - {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_pcie_ag", ""}, - {BCM4362_CHIP_ID, 0, DONT_CARE, TRUE, "bcm43752a0_pcie_ag", ""}, + {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""}, + {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_pcie_ag", ""}, + {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_pcie_ag", ""}, + {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_pcie_ag", ""}, + {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_pcie_ag", ""}, + {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_pcie_ag", ""}, #endif #ifdef BCMDBUS - {BCM43143_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43143b0", ""}, - {BCM43242_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43242a1_ag", ""}, - {BCM43569_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4358u_ag", "ap62x8"}, + {BCM43143_CHIP_ID, 2, DONT_CARE, "bcm43143b0", ""}, + {BCM43242_CHIP_ID, 1, DONT_CARE, "bcm43242a1_ag", ""}, + {BCM43569_CHIP_ID, 2, DONT_CARE, "bcm4358u_ag", "ap62x8"}, #endif }; @@ -115,15 +139,15 @@ dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) { int i; - CONFIG_TRACE(("%s called\n", __FUNCTION__)); + CONFIG_TRACE("called\n"); if (mac_list->m_mac_list_head) { for (i=0; icount; i++) { if (mac_list->m_mac_list_head[i].mac) { - CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac)); + CONFIG_TRACE("Free mac %p\n", mac_list->m_mac_list_head[i].mac); kfree(mac_list->m_mac_list_head[i].mac); } } - CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head)); + CONFIG_TRACE("Free m_mac_list_head %p\n", mac_list->m_mac_list_head); kfree(mac_list->m_mac_list_head); } mac_list->count = 0; @@ -132,10 +156,10 @@ dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) void dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list) { - CONFIG_TRACE(("%s called\n", __FUNCTION__)); + CONFIG_TRACE("called\n"); if (chip_nv_list->m_chip_nv_path_head) { - CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head)); + CONFIG_TRACE("Free %p\n", chip_nv_list->m_chip_nv_path_head); kfree(chip_nv_list->m_chip_nv_path_head); } chip_nv_list->count = 0; @@ -148,7 +172,7 @@ dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih) uint32 gpiocontrol, addr; if (CHIPID(sih->chip) == BCM43362_CHIP_ID) { - printf("%s: Enable HW OOB for 43362\n", __FUNCTION__); + CONFIG_MSG("Enable HW OOB for 43362\n"); addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol); gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); gpiocontrol |= 0x2; @@ -189,12 +213,12 @@ dhd_conf_set_blksize(bcmsdh_info_t *sdh) bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32), &cur_blksize, sizeof(int32), FALSE); if (cur_blksize != blksize) { - printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__, + CONFIG_MSG("fn=%d, blksize=%d, cur_blksize=%d\n", fn, blksize, cur_blksize); blksize |= (fn<<16); if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize, sizeof(blksize), TRUE) != BCME_OK) { - DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + CONFIG_ERROR("fail on get sd_blocksize"); err = -1; } } @@ -213,13 +237,13 @@ dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac) uint8 *cis; if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) { - CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__)); + CONFIG_ERROR("cis malloc failed\n"); return err; } bzero(cis, SBSDIO_CIS_SIZE_LIMIT); if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) { - CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err)); + CONFIG_ERROR("cis read err %d\n", err); MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); return err; } @@ -242,8 +266,8 @@ dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac) if (tpl_link == 0xff) break; if (config_msg_level & CONFIG_TRACE_LEVEL) { - printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n", - __FUNCTION__, tpl_code, tpl_link, *ptr); + CONFIG_MSG("tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n", + tpl_code, tpl_link, *ptr); printk("%s: value:", __FUNCTION__); for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { strcpy(name_ptr, mac_list[i].name); - printf("%s: matched oui=0x%06X, nic=0x%06X\n", - __FUNCTION__, oui, nic); - printf("%s: fw_path=%s\n", __FUNCTION__, fw_path); + CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic); + CONFIG_MSG("fw_path=%s\n", fw_path); return; } } @@ -366,7 +390,8 @@ dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char } void -dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path) +dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, + si_t *sih, char *nv_path) { int i, j; uint8 mac[6]={0}; @@ -382,7 +407,7 @@ dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char return; if (dhd_conf_get_mac(dhd, sdh, sih, mac)) { - CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); + CONFIG_ERROR("Can not read MAC address\n"); return; } oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); @@ -403,9 +428,8 @@ dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char if (oui == mac_range[j].oui) { if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { strcpy(pnv_name, mac_list[i].name); - printf("%s: matched oui=0x%06X, nic=0x%06X\n", - __FUNCTION__, oui, nic); - printf("%s: nv_path=%s\n", __FUNCTION__, nv_path); + CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic); + CONFIG_MSG("nv_path=%s\n", nv_path); return; } } @@ -419,17 +443,17 @@ dhd_conf_free_country_list(conf_country_list_t *country_list) { int i; - CONFIG_TRACE(("%s called\n", __FUNCTION__)); - for (i=0; icount; i++) { + CONFIG_TRACE("called\n"); + for (i=0; icspec[i]) { - CONFIG_TRACE(("%s Free cspec %p\n", __FUNCTION__, country_list->cspec[i])); + CONFIG_TRACE("Free cspec %p\n", country_list->cspec[i]); kfree(country_list->cspec[i]); + country_list->cspec[i] = NULL; } } - country_list->count = 0; } -void +int dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) { int fw_type, ag_type; @@ -446,8 +470,8 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) if (fw_path[0] == '\0') #endif { - printf("firmware path is null\n"); - return; + CONFIG_MSG("firmware path is null\n"); + return 0; } } #ifndef FW_PATH_AUTO_SELECT @@ -482,10 +506,10 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) else fw_type = FW_TYPE_STA; - for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { const cihp_name_map_t* row = &chip_name_map[i]; if (row->chip == chip && row->chiprev == chiprev && - (row->ag_type == ag_type || row->ag_type == DONT_CARE)) { + (row->ag_type == ag_type || row->ag_type == DONT_CARE)) { strcpy(name_ptr, "fw_"); strcat(fw_path, row->chip_name); #ifdef BCMUSBDEV_COMPOSITE @@ -508,11 +532,12 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) dhd->conf->fw_type = fw_type; - CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path)); + CONFIG_TRACE("firmware_path=%s\n", fw_path); + return ag_type; } void -dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path) +dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path, int ag_type) { uint chip, chiprev; int i; @@ -522,7 +547,7 @@ dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path) chiprev = dhd->conf->chiprev; if (clm_path[0] == '\0') { - printf("clm path is null\n"); + CONFIG_MSG("clm path is null\n"); return; } @@ -537,20 +562,21 @@ dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path) } name_ptr = &clm_path[i]; - for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { const cihp_name_map_t* row = &chip_name_map[i]; - if (row->chip == chip && row->chiprev == chiprev && row->clm) { + if (row->chip == chip && row->chiprev == chiprev && + (row->ag_type == ag_type || row->ag_type == DONT_CARE)) { strcpy(name_ptr, "clm_"); strcat(clm_path, row->chip_name); strcat(clm_path, ".blob"); } } - CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path)); + CONFIG_TRACE("clm_path=%s\n", clm_path); } void -dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) +dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path, int ag_type) { uint chip, chiprev; int i; @@ -565,7 +591,7 @@ dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) if (nv_path[0] == '\0') #endif { - printf("nvram path is null\n"); + CONFIG_MSG("nvram path is null\n"); return; } } @@ -581,9 +607,11 @@ dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) } name_ptr = &nv_path[i]; - for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { const cihp_name_map_t* row = &chip_name_map[i]; - if (row->chip == chip && row->chiprev == chiprev && strlen(row->module_name)) { + if (row->chip == chip && row->chiprev == chiprev && + (row->ag_type == ag_type || row->ag_type == DONT_CARE) && + strlen(row->module_name)) { strcpy(name_ptr, "nvram_"); strcat(name_ptr, row->module_name); #ifdef BCMUSBDEV_COMPOSITE @@ -601,16 +629,16 @@ dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) } } - CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path)); + CONFIG_TRACE("nvram_path=%s\n", nv_path); } void -dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path) +dhd_conf_copy_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path) { int i; if (src_path[0] == '\0') { - printf("src_path is null\n"); + CONFIG_MSG("src_path is null\n"); return; } else strcpy(dst_path, src_path); @@ -626,7 +654,7 @@ dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path } strcpy(&dst_path[i], dst_name); - CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path)); + CONFIG_TRACE("dst_path=%s\n", dst_path); } #ifdef CONFIG_PATH_AUTO_SELECT @@ -641,7 +669,7 @@ dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) chiprev = dhd->conf->chiprev; if (conf_path[0] == '\0') { - printf("config path is null\n"); + CONFIG_MSG("config path is null\n"); return; } @@ -665,10 +693,45 @@ dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) } } - CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path)); + CONFIG_TRACE("config_path=%s\n", conf_path); } #endif +void +dhd_conf_set_path_params(dhd_pub_t *dhd, void *sdh, void *sih, + char *fw_path, char *nv_path) +{ + int ag_type; + + /* External conf takes precedence if specified */ + dhd_conf_preinit(dhd); + + if (dhd->conf_path[0] == '\0') { + dhd_conf_copy_path(dhd, "config.txt", dhd->conf_path, nv_path); + } + if (dhd->clm_path[0] == '\0') { + dhd_conf_copy_path(dhd, "clm.blob", dhd->clm_path, fw_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(dhd, dhd->conf_path); +#endif + + dhd_conf_read_config(dhd, dhd->conf_path); + + ag_type = dhd_conf_set_fw_name_by_chip(dhd, fw_path); + dhd_conf_set_nv_name_by_chip(dhd, nv_path, ag_type); + dhd_conf_set_clm_name_by_chip(dhd, dhd->clm_path, ag_type); +#ifdef BCMSDIO + dhd_conf_set_fw_name_by_mac(dhd, (bcmsdh_info_t *)sdh, (si_t *)sih, fw_path); + dhd_conf_set_nv_name_by_mac(dhd, (bcmsdh_info_t *)sdh, (si_t *)sih, nv_path); +#endif + + CONFIG_MSG("Final fw_path=%s\n", fw_path); + CONFIG_MSG("Final nv_path=%s\n", nv_path); + CONFIG_MSG("Final clm_path=%s\n", dhd->clm_path); + CONFIG_MSG("Final conf_path=%s\n", dhd->conf_path); +} + int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down) @@ -679,17 +742,17 @@ dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, if (val >= def) { if (down) { if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret); } if (cmd == WLC_SET_VAR) { - CONFIG_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val)); + CONFIG_TRACE("set %s %d\n", name, val); bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + CONFIG_ERROR("%s setting failed %d\n", name, ret); } else { - CONFIG_TRACE(("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val)); + CONFIG_TRACE("set %s %d %d\n", name, cmd, val); if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + CONFIG_ERROR("%s setting failed %d\n", name, ret); } } @@ -697,31 +760,37 @@ dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, } int -dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, - int len, bool down) +dhd_conf_set_bufiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name, + char *buf, int len, bool down) { char iovbuf[WLC_IOCTL_SMLEN]; + s32 iovar_len; int ret = -1; if (down) { - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, ifidx)) < 0) + CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret); } if (cmd == WLC_SET_VAR) { - bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + iovar_len = bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf)); + if (iovar_len > 0) + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx); + else + ret = BCME_BUFTOOSHORT; + if (ret < 0) + CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len); } else { - if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, ifidx)) < 0) + CONFIG_ERROR("%s setting failed %d\n", name, ret); } return ret; } int -dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx) +dhd_conf_get_iovar(dhd_pub_t *dhd, int ifidx, int cmd, char *name, + char *buf, int len) { char iovbuf[WLC_IOCTL_SMLEN]; int ret = -1; @@ -732,21 +801,21 @@ dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int if (!ret) { memcpy(buf, iovbuf, len); } else { - CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + CONFIG_ERROR("get iovar %s failed %d\n", name, ret); } } else { - CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name)); + CONFIG_ERROR("mkiovar %s failed\n", name); } } else { ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0); if (ret < 0) - CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + CONFIG_ERROR("get iovar %s failed %d\n", name, ret); } return ret; } -uint +int dhd_conf_get_band(dhd_pub_t *dhd) { int band = -1; @@ -754,7 +823,7 @@ dhd_conf_get_band(dhd_pub_t *dhd) if (dhd && dhd->conf) band = dhd->conf->band; else - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + CONFIG_ERROR("dhd or conf is NULL\n"); return band; } @@ -766,8 +835,9 @@ dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec) memset(cspec, 0, sizeof(wl_country_t)); bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0) - CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), + FALSE, 0)) < 0) + CONFIG_ERROR("country code getting failed %d\n", bcmerror); return bcmerror; } @@ -779,16 +849,25 @@ dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec) struct dhd_conf *conf = dhd->conf; conf_country_list_t *country_list = &conf->country_list; - for (i = 0; i < country_list->count; i++) { - if (!strncmp(cspec->country_abbrev, country_list->cspec[i]->country_abbrev, 2)) { - memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ); - cspec->rev = country_list->cspec[i]->rev; - bcmerror = 0; + for (i=0; icspec[i] != NULL) { + if (!strncmp("**", country_list->cspec[i]->country_abbrev, 2)) { + memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = country_list->cspec[i]->rev; + bcmerror = 0; + break; + } else if (!strncmp(cspec->country_abbrev, + country_list->cspec[i]->country_abbrev, 2)) { + memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = country_list->cspec[i]->rev; + bcmerror = 0; + break; + } } } if (!bcmerror) - printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev); + CONFIG_MSG("%s/%d\n", cspec->ccode, cspec->rev); return bcmerror; } @@ -800,11 +879,12 @@ dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec) memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t)); - printf("%s: set country %s, revision %d\n", __FUNCTION__, cspec->ccode, cspec->rev); - bcmerror = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)cspec, + CONFIG_MSG("set country %s, revision %d\n", cspec->ccode, cspec->rev); + bcmerror = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "country", (char *)cspec, sizeof(wl_country_t), FALSE); dhd_conf_get_country(dhd, cspec); - printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev); + CONFIG_MSG("Country code: %s (%s/%d)\n", + cspec->country_abbrev, cspec->ccode, cspec->rev); return bcmerror; } @@ -813,7 +893,7 @@ int dhd_conf_fix_country(dhd_pub_t *dhd) { int bcmerror = -1; - uint band; + int band; wl_uint32_list_t *list; u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; wl_country_t cspec; @@ -825,16 +905,17 @@ dhd_conf_fix_country(dhd_pub_t *dhd) memset(valid_chan_list, 0, sizeof(valid_chan_list)); list = (wl_uint32_list_t *)(void *) valid_chan_list; list->count = htod32(WL_NUMCHANNELS); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) { - CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, + sizeof(valid_chan_list), FALSE, 0)) < 0) { + CONFIG_ERROR("get channels failed with %d\n", bcmerror); } band = dhd_conf_get_band(dhd); - if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) && + if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G || band==-1) && dtoh32(list->count)<11)) { - CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n", - __FUNCTION__, bcmerror, dtoh32(list->count))); + CONFIG_ERROR("bcmerror=%d, # of channels %d\n", + bcmerror, dtoh32(list->count)); dhd_conf_map_country_list(dhd, &dhd->conf->cspec); if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) { strcpy(cspec.country_abbrev, "US"); @@ -863,7 +944,7 @@ dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel) } } else { match = true; - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + CONFIG_ERROR("dhd or conf is NULL\n"); } return match; @@ -879,19 +960,20 @@ dhd_conf_set_roam(dhd_pub_t *dhd) dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE); if (!conf->roam_off || !conf->roam_off_suspend) { - printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]); - dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER", - (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); + CONFIG_MSG("set roam_trigger %d\n", conf->roam_trigger[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER", + (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); - printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]); - dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD", - (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); + CONFIG_MSG("set roam_scan_period %d\n", conf->roam_scan_period[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD", + (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); - printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]); - dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA", - (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE); + CONFIG_MSG("set roam_delta %d\n", conf->roam_delta[0]); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA", + (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE); - dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", + dhd->conf->fullroamperiod, 1, FALSE); } return bcmerror; @@ -908,8 +990,8 @@ dhd_conf_add_to_eventbuffer(struct eventmsg_buf *ev, u16 event, bool set) ev->event[ev->num].set = set; ev->num++; } else { - CONFIG_ERROR(("evenbuffer doesn't support > %u events. Update" - " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM)); + CONFIG_ERROR("evenbuffer doesn't support > %u events. Update" + " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM); ASSERT(0); } } @@ -924,9 +1006,10 @@ dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev) return -EINVAL; /* Read event_msgs mask */ - ret = dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs", eventmask, sizeof(eventmask), 0); + ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", eventmask, + sizeof(eventmask)); if (unlikely(ret)) { - CONFIG_ERROR(("Get event_msgs error (%d)\n", ret)); + CONFIG_ERROR("Get event_msgs error (%d)\n", ret); goto exit; } @@ -939,10 +1022,10 @@ dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev) } /* Write updated Event mask */ - ret = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask, + ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", eventmask, sizeof(eventmask), FALSE); if (unlikely(ret)) { - CONFIG_ERROR(("Set event_msgs error (%d)\n", ret)); + CONFIG_ERROR("Set event_msgs error (%d)\n", ret); } exit: @@ -971,7 +1054,7 @@ dhd_conf_enable_roam_offload(dhd_pub_t *dhd, int enable) dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable); err = dhd_conf_apply_eventbuffer(dhd, &ev_buf); - CONFIG_TRACE(("%s: roam_offload %d\n", __FUNCTION__, enable)); + CONFIG_TRACE("roam_offload %d\n", enable); return err; } @@ -988,21 +1071,23 @@ dhd_conf_set_bw_cap(dhd_pub_t *dhd) memset(¶m, 0, sizeof(param)); param.band = WLC_BAND_2G; param.bw_cap = (uint)dhd->conf->bw_cap[0]; - printf("%s: set bw_cap 2g 0x%x\n", __FUNCTION__, param.bw_cap); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + CONFIG_MSG("set bw_cap 2g 0x%x\n", param.bw_cap); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m, + sizeof(param), FALSE); } if (dhd->conf->bw_cap[1] >= 0) { memset(¶m, 0, sizeof(param)); param.band = WLC_BAND_5G; param.bw_cap = (uint)dhd->conf->bw_cap[1]; - printf("%s: set bw_cap 5g 0x%x\n", __FUNCTION__, param.bw_cap); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + CONFIG_MSG("set bw_cap 5g 0x%x\n", param.bw_cap); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m, + sizeof(param), FALSE); } } void -dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp) +dhd_conf_get_wme(dhd_pub_t *dhd, int ifidx, int mode, edcf_acparam_t *acp) { int bcmerror = -1; char iovbuf[WLC_IOCTL_SMLEN]; @@ -1018,42 +1103,40 @@ dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp) bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf)); else bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { - CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), + FALSE, ifidx)) < 0) { + CONFIG_ERROR("wme_ac_sta getting failed %d\n", bcmerror); return; } memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT); acparam = &acp[AC_BK]; - CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", - __FUNCTION__, + CONFIG_TRACE("BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - acparam->TXOP)); + acparam->TXOP); acparam = &acp[AC_BE]; - CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", - __FUNCTION__, + CONFIG_TRACE("BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - acparam->TXOP)); + acparam->TXOP); acparam = &acp[AC_VI]; - CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", - __FUNCTION__, + CONFIG_TRACE("VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - acparam->TXOP)); + acparam->TXOP); acparam = &acp[AC_VO]; - CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", - __FUNCTION__, + CONFIG_TRACE("VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - acparam->TXOP)); + acparam->TXOP); return; } void -dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci) +dhd_conf_update_wme(dhd_pub_t *dhd, int ifidx, int mode, + edcf_acparam_t *acparam_cur, int aci) { int aifsn, ecwmin, ecwmax, txop; edcf_acparam_t *acp; @@ -1092,9 +1175,8 @@ dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int a acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK)); acp->TXOP = txop; - printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", - __FUNCTION__, mode?"ap":"sta", - acp->ACI, acp->ACI&EDCF_AIFSN_MASK, + CONFIG_MSG("wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + mode?"ap":"sta", acp->ACI, acp->ACI&EDCF_AIFSN_MASK, acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, acp->TXOP); @@ -1104,36 +1186,38 @@ dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int a * NOTE: only one of the four ACs can be set at a time. */ if (mode == 0) - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE); + dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_sta", (char *)acp, + sizeof(edcf_acparam_t), FALSE); else - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE); + dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_ap", (char *)acp, + sizeof(edcf_acparam_t), FALSE); } void -dhd_conf_set_wme(dhd_pub_t *dhd, int mode) +dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode) { edcf_acparam_t acparam_cur[AC_COUNT]; if (dhd && dhd->conf) { if (!dhd->conf->force_wme_ac) { - CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n", - __FUNCTION__, dhd->conf->force_wme_ac)); + CONFIG_TRACE("force_wme_ac is not enabled %d\n", + dhd->conf->force_wme_ac); return; } - CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__)); - dhd_conf_get_wme(dhd, mode, acparam_cur); + CONFIG_TRACE("Before change:\n"); + dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur); - dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK); - dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE); - dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI); - dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BK], AC_BK); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BE], AC_BE); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VI], AC_VI); + dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VO], AC_VO); - CONFIG_TRACE(("%s: After change:\n", __FUNCTION__)); - dhd_conf_get_wme(dhd, mode, acparam_cur); + CONFIG_TRACE("After change:\n"); + dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur); } else { - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + CONFIG_ERROR("dhd or conf is NULL\n"); } return; @@ -1170,13 +1254,13 @@ dhd_conf_add_pkt_filter(dhd_pub_t *dhd) /* * Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E) * 1) dhd_master_mode=1 - * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107 * 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E * 4) magic_pkt_filter_add=141 0 1 12 */ for(i=0; iconf->pkt_filter_add.count; i++) { dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i]; - printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]); + CONFIG_MSG("%s\n", dhd->pktfilter[i+dhd->pktfilter_count]); } dhd->pktfilter_count += i; @@ -1203,7 +1287,7 @@ dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) if (dhd && dhd->conf) { for (i=0; iconf->pkt_filter_del.count; i++) { if (id == dhd->conf->pkt_filter_del.id[i]) { - printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]); + CONFIG_MSG("%d\n", dhd->conf->pkt_filter_del.id[i]); return true; } } @@ -1249,83 +1333,535 @@ dhd_conf_get_pm(dhd_pub_t *dhd) return -1; } +int +dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len, + int *hostsleep_set, int *hostsleep_val, int *ret) +{ + if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) { + if (cmd == WLC_SET_VAR) { + char *psleep = NULL; + psleep = strstr(buf, "hostsleep"); + if (psleep) { + *hostsleep_set = 1; + memcpy(hostsleep_val, psleep+strlen("hostsleep")+1, sizeof(int)); + } + } + if (dhd->hostsleep && (!*hostsleep_set || *hostsleep_val)) { + CONFIG_TRACE("block all none hostsleep clr cmd\n"); + *ret = BCME_EPERM; + goto exit; + } else if (*hostsleep_set && *hostsleep_val) { + CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val); + dhd->hostsleep = *hostsleep_val; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + } + if (dhd->hostsleep == 2) { + *ret = 0; + goto exit; + } + } else if (dhd->hostsleep == 2 && !*hostsleep_val) { + CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val); + dhd->hostsleep = *hostsleep_val; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + *ret = 0; + goto exit; + } + } + + return 0; +exit: + return -1; +} + +void +dhd_conf_get_hostsleep(dhd_pub_t *dhd, + int hostsleep_set, int hostsleep_val, int ret) +{ + if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) { + if (hostsleep_set) { + if (hostsleep_val && ret) { + CONFIG_TRACE("reset hostsleep %d => 0\n", dhd->hostsleep); + dhd->hostsleep = 0; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } else if (!hostsleep_val && !ret) { + CONFIG_TRACE("set hostsleep %d => 0\n", dhd->hostsleep); + dhd->hostsleep = 0; + if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + } + } +} + +#ifdef WL_EXT_WOWL +#define WL_WOWL_TCPFIN (1 << 26) +typedef struct wl_wowl_pattern2 { + char cmd[4]; + wl_wowl_pattern_t wowl_pattern; +} wl_wowl_pattern2_t; +static int +dhd_conf_wowl_pattern(dhd_pub_t *dhd, bool add, char *data) +{ + uint buf_len = 0; + int id, type, polarity, offset; + char cmd[4]="\0", mask[128]="\0", pattern[128]="\0", mask_tmp[128]="\0", *pmask_tmp; + uint32 masksize, patternsize, pad_len = 0; + wl_wowl_pattern2_t *wowl_pattern2 = NULL; + char *mask_and_pattern; + int ret = 0, i, j, v; + + if (data) { + if (add) + strcpy(cmd, "add"); + else + strcpy(cmd, "clr"); + if (!strcmp(cmd, "clr")) { + CONFIG_TRACE("wowl_pattern clr\n"); + ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern", cmd, + sizeof(cmd), FALSE); + goto exit; + } + sscanf(data, "%d %d %d %d %s %s", &id, &type, &polarity, &offset, + mask_tmp, pattern); + masksize = strlen(mask_tmp) -2; + CONFIG_TRACE("0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize); + + // add pading + if (masksize % 16) + pad_len = (16 - masksize % 16); + for (i=0; icmd, cmd, sizeof(cmd)); + wowl_pattern2->wowl_pattern.id = id; + wowl_pattern2->wowl_pattern.type = 0; + wowl_pattern2->wowl_pattern.offset = offset; + mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t); + + wowl_pattern2->wowl_pattern.masksize = masksize; + ret = wl_pattern_atoh(mask, mask_and_pattern); + if (ret == -1) { + CONFIG_ERROR("rejecting mask=%s\n", mask); + goto exit; + } + + mask_and_pattern += wowl_pattern2->wowl_pattern.masksize; + wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) + + wowl_pattern2->wowl_pattern.masksize; + + wowl_pattern2->wowl_pattern.patternsize = patternsize; + ret = wl_pattern_atoh(pattern, mask_and_pattern); + if (ret == -1) { + CONFIG_ERROR("rejecting pattern=%s\n", pattern); + goto exit; + } + + CONFIG_TRACE("%s %d %s %s\n", cmd, offset, mask, pattern); + + ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern", + (char *)wowl_pattern2, buf_len, FALSE); + } + +exit: + if (wowl_pattern2) + kfree(wowl_pattern2); + return ret; +} + +static int +dhd_conf_wowl_wakeind(dhd_pub_t *dhd, bool clear) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + wl_wowl_wakeind_t *wake = NULL; + int ret = -1; + char clr[6]="clear", wakeind_str[32]="\0"; + + if (clear) { + CONFIG_TRACE("wowl_wakeind clear\n"); + ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_wakeind", + clr, sizeof(clr), 0); + } else { + ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "wowl_wakeind", + iovar_buf, sizeof(iovar_buf)); + if (!ret) { + wake = (wl_wowl_wakeind_t *) iovar_buf; + if (wake->ucode_wakeind & WL_WOWL_MAGIC) + strcpy(wakeind_str, "(MAGIC packet)"); + if (wake->ucode_wakeind & WL_WOWL_NET) + strcpy(wakeind_str, "(Netpattern)"); + if (wake->ucode_wakeind & WL_WOWL_DIS) + strcpy(wakeind_str, "(Disassoc/Deauth)"); + if (wake->ucode_wakeind & WL_WOWL_BCN) + strcpy(wakeind_str, "(Loss of beacon)"); + if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME) + strcpy(wakeind_str, "(TCPKA timeout)"); + if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA) + strcpy(wakeind_str, "(TCPKA data)"); + if (wake->ucode_wakeind & WL_WOWL_TCPFIN) + strcpy(wakeind_str, "(TCP FIN)"); + CONFIG_MSG("wakeind=0x%x %s\n", wake->ucode_wakeind, wakeind_str); + } + } + + return ret; +} +#endif + +int +dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period, + char *packet, bool bcast) +{ + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int ret = 0, len_bytes=0, buf_len=0; + char *buf = NULL, *iovar_buf = NULL; + struct ether_addr bssid; + uint8 *pdata; + + CONFIG_TRACE("id=%d, period=%d, packet=%s\n", id, period, packet); + if (period >= 0) { + buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN); + goto exit; + } + iovar_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (iovar_buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN); + goto exit; + } + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf; + mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pktp->keep_alive_id = id; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + mkeep_alive_pktp->period_msec = period; + if (strlen(packet)) { + len_bytes = wl_pattern_atoh(packet, (char *)mkeep_alive_pktp->data); + buf_len += len_bytes; + if (bcast) { + memcpy(mkeep_alive_pktp->data, ðer_bcast, ETHER_ADDR_LEN); + } else { + memset(&bssid, 0, ETHER_ADDR_LEN); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, + FALSE, ifidx); + if (ret != BCME_NOTASSOCIATED && memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) + memcpy(mkeep_alive_pktp->data, &bssid, ETHER_ADDR_LEN); + } + ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", + iovar_buf, WLC_IOCTL_SMLEN); + if (!ret) { + pdata = mkeep_alive_pktp->data; + memcpy(pdata+6, iovar_buf, ETHER_ADDR_LEN); + } + } + mkeep_alive_pktp->len_bytes = htod16(len_bytes); + ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "mkeep_alive", + buf, buf_len, FALSE); + } + +exit: + if (buf) + kfree(buf); + if (iovar_buf) + kfree(iovar_buf); + return ret; +} + +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable) +{ + int i, len = 0, total_len = WLC_IOCTL_SMLEN; + char *iovar_buf = NULL, *packet = NULL; + + if (!dhd->conf->garp || ifidx != 0 || !(dhd->op_mode & DHD_FLAG_STA_MODE)) + return; + + CONFIG_TRACE("enable=%d\n", enable); + + if (enable) { + iovar_buf = kmalloc(total_len, GFP_KERNEL); + if (iovar_buf == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len); + goto exit; + } + packet = kmalloc(total_len, GFP_KERNEL); + if (packet == NULL) { + CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len); + goto exit; + } + dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", iovar_buf, total_len); + + len += snprintf(packet+len, total_len, "0xffffffffffff"); + for (i=0; i>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff); + len += snprintf(packet+len, total_len, "ffffffffffffc0a80101000000000000000000000000000000000000"); + } + + dhd_conf_mkeep_alive(dhd, ifidx, 0, dhd->conf->keep_alive_period, packet, TRUE); + +exit: + if (iovar_buf) + kfree(iovar_buf); + if (packet) + kfree(packet); + return; +} +#endif + uint -dhd_conf_get_insuspend(dhd_pub_t *dhd) +dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask) { - uint mode = 0; + uint insuspend = 0; if (dhd->op_mode & DHD_FLAG_STA_MODE) { - mode = dhd->conf->insuspend & - (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | ROAM_OFFLOAD_IN_SUSPEND); + insuspend = dhd->conf->insuspend & + (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND | + ROAM_OFFLOAD_IN_SUSPEND | WOWL_IN_SUSPEND); } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { - mode = dhd->conf->insuspend & - (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | AP_DOWN_IN_SUSPEND); + insuspend = dhd->conf->insuspend & + (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND | + AP_DOWN_IN_SUSPEND | AP_FILTER_IN_SUSPEND); } - return mode; + return (insuspend & mask); } +#ifdef SUSPEND_EVENT +void +dhd_conf_set_suspend_event(dhd_pub_t *dhd, int suspend) +{ + struct dhd_conf *conf = dhd->conf; + struct ether_addr bssid; + char suspend_eventmask[WL_EVENTING_MASK_LEN]; + wl_event_msg_t msg; + int pm; +#ifdef WL_CFG80211 + struct net_device *net; +#endif /* defined(WL_CFG80211) */ + + if (suspend) { +#ifdef PROP_TXSTATUS +#if defined(BCMSDIO) || defined(BCMDBUS) + if (dhd->wlfc_enabled) { + dhd_wlfc_deinit(dhd); + conf->wlfc = TRUE; + } else { + conf->wlfc = FALSE; + } +#endif /* BCMSDIO || BCMDBUS */ +#endif /* PROP_TXSTATUS */ + dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", + conf->resume_eventmask, sizeof(conf->resume_eventmask)); + memset(suspend_eventmask, 0, sizeof(suspend_eventmask)); + setbit(suspend_eventmask, WLC_E_ESCAN_RESULT); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", + suspend_eventmask, sizeof(suspend_eventmask), FALSE); + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + memset(&bssid, 0, ETHER_ADDR_LEN); + dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, 0); + if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) + memcpy(&conf->bssid_insuspend, &bssid, ETHER_ADDR_LEN); + else + memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN); + } + } + else { + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", + conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE); + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + if (memcmp(ðer_null, &conf->bssid_insuspend, ETHER_ADDR_LEN)) { + memset(&bssid, 0, ETHER_ADDR_LEN); + dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, + FALSE, 0); + if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) { + dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", 0, 0, FALSE); + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "send_nulldata", + (char *)&bssid, ETHER_ADDR_LEN, FALSE); + OSL_SLEEP(100); + if (conf->pm >= 0) + pm = conf->pm; + else + pm = PM_FAST; + dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE); + } else { + CONFIG_TRACE("send WLC_E_DEAUTH_IND event\n"); + bzero(&msg, sizeof(wl_event_msg_t)); + memcpy(&msg.addr, &conf->bssid_insuspend, ETHER_ADDR_LEN); + msg.event_type = hton32(WLC_E_DEAUTH_IND); + msg.status = 0; + msg.reason = hton32(DOT11_RC_DEAUTH_LEAVING); +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) + wl_ext_event_send(dhd->event_params, &msg, NULL); +#endif +#ifdef WL_CFG80211 + net = dhd_idx2net(dhd, 0); + if (net) { + wl_cfg80211_event(net, &msg, NULL); + } +#endif /* defined(WL_CFG80211) */ + } + } +#ifdef PROP_TXSTATUS +#if defined(BCMSDIO) || defined(BCMDBUS) + if (conf->wlfc) { + dhd_wlfc_init(dhd); + dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE); + } +#endif +#endif /* PROP_TXSTATUS */ + } + } + +} +#endif + int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend) { - uint mode = 0, wl_down = 1; + uint insuspend = 0; struct dhd_conf *conf = dhd->conf; +#ifdef WL_EXT_WOWL + int i; +#endif + + insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND); + if (insuspend) + CONFIG_MSG("op_mode %d, suspend %d, suspended %d, insuspend 0x%x\n", + dhd->op_mode, suspend, conf->suspended, insuspend); - mode = dhd_conf_get_insuspend(dhd); - if (mode) - printf("%s: op_mode %d, suspend %d, mode 0x%x\n", __FUNCTION__, - dhd->op_mode, suspend, mode); + if (conf->suspended == suspend) { + return 0; + } if (suspend) { if (dhd->op_mode & DHD_FLAG_STA_MODE) { - if (mode & ROAM_OFFLOAD_IN_SUSPEND) - dhd_conf_enable_roam_offload(dhd, 1); + if (insuspend & ROAM_OFFLOAD_IN_SUSPEND) + dhd_conf_enable_roam_offload(dhd, 2); } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { - if (mode & AP_DOWN_IN_SUSPEND) { - dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, - sizeof(wl_down), TRUE, 0); + if (insuspend & AP_DOWN_IN_SUSPEND) { + dhd_conf_set_intiovar(dhd, WLC_DOWN, "WLC_DOWN", 1, 0, FALSE); } } #ifdef SUSPEND_EVENT - if (mode & NO_EVENT_IN_SUSPEND) { - char suspend_eventmask[WL_EVENTING_MASK_LEN]; - if (!conf->suspended) { - dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs", - conf->resume_eventmask, sizeof(conf->resume_eventmask), 0); - } - memset(suspend_eventmask, 0, sizeof(suspend_eventmask)); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", - suspend_eventmask, sizeof(suspend_eventmask), FALSE); + if (insuspend & NO_EVENT_IN_SUSPEND) { + dhd_conf_set_suspend_event(dhd, suspend); } #endif + if (insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, ON); + } +#ifdef WL_EXT_WOWL + if ((insuspend & WOWL_IN_SUSPEND) && dhd_master_mode) { + dhd_conf_wowl_pattern(dhd, FALSE, "clr"); + for(i=0; ipkt_filter_add.count; i++) { + dhd_conf_wowl_pattern(dhd, TRUE, conf->pkt_filter_add.filter[i]); + } + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", conf->wowl, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 1, 0, FALSE); + dhd_conf_wowl_wakeind(dhd, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 1, 0, FALSE); + } else +#endif + if (insuspend & NO_TXCTL_IN_SUSPEND) { + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 2, 0, FALSE); + } conf->suspended = TRUE; } else { + if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) { + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 0, 0, FALSE); + } +#ifdef WL_EXT_WOWL + if (insuspend & WOWL_IN_SUSPEND) { + dhd_conf_wowl_wakeind(dhd, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 0, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", 0, 0, FALSE); + dhd_conf_wowl_pattern(dhd, FALSE, "clr"); + } +#endif #ifdef SUSPEND_EVENT - if (mode & NO_EVENT_IN_SUSPEND) { - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", - conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE); - if (dhd->op_mode & DHD_FLAG_STA_MODE) { - struct ether_addr bssid; - int ret = 0; - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, - sizeof(struct ether_addr), FALSE, 0); - if (ret != BCME_NOTASSOCIATED && !memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) { - CONFIG_TRACE(("%s: send disassoc\n", __FUNCTION__)); - dhd_conf_set_intiovar(dhd, WLC_DISASSOC, "WLC_DISASSOC", 0, 0, FALSE); - } - } + if (insuspend & NO_EVENT_IN_SUSPEND) { + dhd_conf_set_suspend_event(dhd, suspend); } #endif if (dhd->op_mode & DHD_FLAG_STA_MODE) { - if (mode & ROAM_OFFLOAD_IN_SUSPEND) + if (insuspend & ROAM_OFFLOAD_IN_SUSPEND) dhd_conf_enable_roam_offload(dhd, 0); } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { - if (mode & AP_DOWN_IN_SUSPEND) { - wl_down = 0; - dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_down, - sizeof(wl_down), TRUE, 0); + if (insuspend & AP_DOWN_IN_SUSPEND) { + dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE); } } + if (insuspend & NO_TXDATA_IN_SUSPEND) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } conf->suspended = FALSE; } @@ -1376,7 +1912,7 @@ dhd_conf_get_disable_proptx(dhd_pub_t *dhd) disable_proptx = 1; } - printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx); + CONFIG_MSG("fw_proptx=%d, disable_proptx=%d\n", fw_proptx, disable_proptx); return disable_proptx; } @@ -1393,7 +1929,7 @@ pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf) column = 0; if (start_pos >= len) { - CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__)); + CONFIG_ERROR("wrong start pos\n"); return 0; } @@ -1444,6 +1980,34 @@ pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf) return n; // return current position } +bool +dhd_conf_read_chiprev(dhd_pub_t *dhd, int *chip_match, + char *full_param, uint len_param) +{ + char *data = full_param+len_param, *pick_tmp, *pch; + uint chip = 0, rev = 0; + + /* Process chip, regrev: + * chip=[chipid], rev==[rev] + * Ex: chip=0x4359, rev=9 + */ + if (!strncmp("chip=", full_param, len_param)) { + chip = (int)simple_strtol(data, NULL, 0); + pick_tmp = data; + pch = bcmstrstr(pick_tmp, "rev="); + if (pch) { + rev = (int)simple_strtol(pch+strlen("rev="), NULL, 0); + } + if (chip == dhd->conf->chip && rev == dhd->conf->chiprev) + *chip_match = 1; + else + *chip_match = 0; + CONFIG_MSG("chip=0x%x, rev=%d, chip_match=%d\n", chip, rev, *chip_match); + } + + return TRUE; +} + bool dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) { @@ -1451,44 +2015,44 @@ dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) if (!strncmp("dhd_msg_level=", full_param, len_param)) { dhd_msg_level = (int)simple_strtol(data, NULL, 0); - printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level); + CONFIG_MSG("dhd_msg_level = 0x%X\n", dhd_msg_level); } #ifdef BCMSDIO else if (!strncmp("sd_msglevel=", full_param, len_param)) { sd_msglevel = (int)simple_strtol(data, NULL, 0); - printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel); + CONFIG_MSG("sd_msglevel = 0x%X\n", sd_msglevel); } #endif #ifdef BCMDBUS else if (!strncmp("dbus_msglevel=", full_param, len_param)) { dbus_msglevel = (int)simple_strtol(data, NULL, 0); - printf("%s: dbus_msglevel = 0x%X\n", __FUNCTION__, dbus_msglevel); + CONFIG_MSG("dbus_msglevel = 0x%X\n", dbus_msglevel); } #endif else if (!strncmp("android_msg_level=", full_param, len_param)) { android_msg_level = (int)simple_strtol(data, NULL, 0); - printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level); + CONFIG_MSG("android_msg_level = 0x%X\n", android_msg_level); } else if (!strncmp("config_msg_level=", full_param, len_param)) { config_msg_level = (int)simple_strtol(data, NULL, 0); - printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level); + CONFIG_MSG("config_msg_level = 0x%X\n", config_msg_level); } #ifdef WL_CFG80211 else if (!strncmp("wl_dbg_level=", full_param, len_param)) { wl_dbg_level = (int)simple_strtol(data, NULL, 0); - printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level); + CONFIG_MSG("wl_dbg_level = 0x%X\n", wl_dbg_level); } #endif #if defined(WL_WIRELESS_EXT) else if (!strncmp("iw_msg_level=", full_param, len_param)) { iw_msg_level = (int)simple_strtol(data, NULL, 0); - printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level); + CONFIG_MSG("iw_msg_level = 0x%X\n", iw_msg_level); } #endif #if defined(DHD_DEBUG) else if (!strncmp("dhd_console_ms=", full_param, len_param)) { dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0); - printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd->dhd_console_ms); + CONFIG_MSG("dhd_console_ms = 0x%X\n", dhd->dhd_console_ms); } #endif else @@ -1506,25 +2070,25 @@ dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) pch = bcmstrstr(pick_tmp, "aifsn "); if (pch) { wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0); - printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]); + CONFIG_MSG("ac_val=%d, aifsn=%d\n", ac_val, wme->aifsn[ac_val]); } pick_tmp = pick; pch = bcmstrstr(pick_tmp, "ecwmin "); if (pch) { wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0); - printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]); + CONFIG_MSG("ac_val=%d, ecwmin=%d\n", ac_val, wme->ecwmin[ac_val]); } pick_tmp = pick; pch = bcmstrstr(pick_tmp, "ecwmax "); if (pch) { wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0); - printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]); + CONFIG_MSG("ac_val=%d, ecwmax=%d\n", ac_val, wme->ecwmax[ac_val]); } pick_tmp = pick; pch = bcmstrstr(pick_tmp, "txop "); if (pch) { wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0); - printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]); + CONFIG_MSG("ac_val=%d, txop=0x%x\n", ac_val, wme->txop[ac_val]); } } @@ -1540,7 +2104,7 @@ dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) if (!strncmp("force_wme_ac=", full_param, len_param)) { conf->force_wme_ac = (int)simple_strtol(data, NULL, 10); - printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac); + CONFIG_MSG("force_wme_ac = %d\n", conf->force_wme_ac); } else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) { dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE); @@ -1572,6 +2136,7 @@ dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) return true; } +#ifdef BCMSDIO bool dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) { @@ -1600,22 +2165,24 @@ dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ", 0); conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) { + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, + GFP_KERNEL))) { conf->fw_by_mac.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); } - printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count); + CONFIG_MSG("fw_count=%d\n", conf->fw_by_mac.count); conf->fw_by_mac.m_mac_list_head = mac_list; for (i=0; ifw_by_mac.count; i++) { pch = bcmstrtok(&pick_tmp, " ", 0); strcpy(mac_list[i].name, pch); pch = bcmstrtok(&pick_tmp, " ", 0); mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + CONFIG_MSG("name=%s, mac_count=%d\n", mac_list[i].name, mac_list[i].count); - if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, + GFP_KERNEL))) { mac_list[i].count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); break; } mac_list[i].mac = mac_range; @@ -1626,9 +2193,8 @@ dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0); pch = bcmstrtok(&pick_tmp, " ", 0); mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n", - __FUNCTION__, mac_range[j].oui, - mac_range[j].nic_start, mac_range[j].nic_end); + CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n", + mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end); } } } @@ -1655,22 +2221,24 @@ dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ", 0); conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) { + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, + GFP_KERNEL))) { conf->nv_by_mac.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); } - printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count); + CONFIG_MSG("nv_count=%d\n", conf->nv_by_mac.count); conf->nv_by_mac.m_mac_list_head = mac_list; for (i=0; inv_by_mac.count; i++) { pch = bcmstrtok(&pick_tmp, " ", 0); strcpy(mac_list[i].name, pch); pch = bcmstrtok(&pick_tmp, " ", 0); mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + CONFIG_MSG("name=%s, mac_count=%d\n", mac_list[i].name, mac_list[i].count); - if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, + GFP_KERNEL))) { mac_list[i].count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); break; } mac_list[i].mac = mac_range; @@ -1681,9 +2249,8 @@ dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0); pch = bcmstrtok(&pick_tmp, " ", 0); mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n", - __FUNCTION__, mac_range[j].oui, - mac_range[j].nic_start, mac_range[j].nic_end); + CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n", + mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end); } } } @@ -1692,6 +2259,7 @@ dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) return true; } +#endif bool dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param) @@ -1712,11 +2280,12 @@ dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param) pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ", 0); conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) { + if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, + GFP_KERNEL))) { conf->nv_by_chip.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); } - printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count); + CONFIG_MSG("nv_by_chip_count=%d\n", conf->nv_by_chip.count); conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path; for (i=0; inv_by_chip.count; i++) { pch = bcmstrtok(&pick_tmp, " ", 0); @@ -1725,7 +2294,7 @@ dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param) chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0); pch = bcmstrtok(&pick_tmp, " ", 0); strcpy(chip_nv_path[i].name, pch); - printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__, + CONFIG_MSG("chip=0x%x, chiprev=%d, name=%s\n", chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name); } } @@ -1746,33 +2315,30 @@ dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->roam_off = 0; else conf->roam_off = 1; - printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off); + CONFIG_MSG("roam_off = %d\n", conf->roam_off); } else if (!strncmp("roam_off_suspend=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->roam_off_suspend = 0; else conf->roam_off_suspend = 1; - printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend); + CONFIG_MSG("roam_off_suspend = %d\n", conf->roam_off_suspend); } else if (!strncmp("roam_trigger=", full_param, len_param)) { conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10); - printf("%s: roam_trigger = %d\n", __FUNCTION__, - conf->roam_trigger[0]); + CONFIG_MSG("roam_trigger = %d\n", conf->roam_trigger[0]); } else if (!strncmp("roam_scan_period=", full_param, len_param)) { conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10); - printf("%s: roam_scan_period = %d\n", __FUNCTION__, - conf->roam_scan_period[0]); + CONFIG_MSG("roam_scan_period = %d\n", conf->roam_scan_period[0]); } else if (!strncmp("roam_delta=", full_param, len_param)) { conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10); - printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]); + CONFIG_MSG("roam_delta = %d\n", conf->roam_delta[0]); } else if (!strncmp("fullroamperiod=", full_param, len_param)) { conf->fullroamperiod = (int)simple_strtol(data, NULL, 10); - printf("%s: fullroamperiod = %d\n", __FUNCTION__, - conf->fullroamperiod); + CONFIG_MSG("fullroamperiod = %d\n", conf->fullroamperiod); } else return false; @@ -1782,22 +2348,31 @@ dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param) bool dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param) { - int i; - char *pch, *pick_tmp, *pick_tmp2; struct dhd_conf *conf = dhd->conf; + conf_country_list_t *country_list = &conf->country_list; + int i, count = 0; + char *pch, *pick_tmp, *pick_tmp2; char *data = full_param+len_param; wl_country_t *cspec; - conf_country_list_t *country_list = NULL; + uint len_data = strlen(data); /* Process country_list: * country_list=[country1]:[ccode1]/[regrev1], * [country2]:[ccode2]/[regrev2] \ * Ex: country_list=US:US/0, TW:TW/1 */ - if (!strncmp("country_list=", full_param, len_param)) { - country_list = &dhd->conf->country_list; + if (!strncmp("ccode=", full_param, len_param)) { + len_data = min((uint)WLC_CNTRY_BUF_SZ, len_data); + memset(&conf->cspec, 0, sizeof(wl_country_t)); + memcpy(conf->cspec.country_abbrev, data, len_data); + memcpy(conf->cspec.ccode, data, len_data); + CONFIG_MSG("ccode = %s\n", conf->cspec.ccode); } - if (country_list) { + else if (!strncmp("regrev=", full_param, len_param)) { + conf->cspec.rev = (int32)simple_strtol(data, NULL, 10); + CONFIG_MSG("regrev = %d\n", conf->cspec.rev); + } + else if (!strncmp("country_list=", full_param, len_param)) { pick_tmp = data; for (i=0; irev = (int32)simple_strtol(pch, NULL, 10); - country_list->count++; + count++; country_list->cspec[i] = cspec; - CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__, - cspec->country_abbrev, cspec->ccode, cspec->rev)); - } - if (!strncmp("country_list=", full_param, len_param)) { - printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count); + CONFIG_TRACE("country_list abbrev=%s, ccode=%s, regrev=%d\n", + cspec->country_abbrev, cspec->ccode, cspec->rev); } + CONFIG_MSG("%d country in list\n", count); } else return false; @@ -1865,7 +2438,7 @@ dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param) } else { conf->mchan[i].bw = (int)simple_strtol(pch, NULL, 0); if (conf->mchan[i].bw < 0 || conf->mchan[i].bw > 100) { - CONFIG_ERROR(("%s: wrong bw %d\n", __FUNCTION__, conf->mchan[i].bw)); + CONFIG_ERROR("wrong bw %d\n", conf->mchan[i].bw); conf->mchan[i].bw = 0; break; } @@ -1897,8 +2470,8 @@ dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param) } for (i=0; imchan[i].bw >= 0) - printf("%s: mchan_bw=%d/%d/%d\n", __FUNCTION__, - conf->mchan[i].bw, conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode); + CONFIG_MSG("mchan_bw=%d/%d/%d\n", conf->mchan[i].bw, + conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode); } } else @@ -1918,7 +2491,7 @@ dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) /* Process pkt filter: * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 - * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 2) pkt_filter_delete=100, 102, 103, 104, 105 * 3) magic_pkt_filter_add=141 0 1 12 */ if (!strncmp("dhd_master_mode=", full_param, len_param)) { @@ -1926,7 +2499,7 @@ dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) dhd_master_mode = FALSE; else dhd_master_mode = TRUE; - printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode); + CONFIG_MSG("dhd_master_mode = %d\n", dhd_master_mode); } else if (!strncmp("pkt_filter_add=", full_param, len_param)) { pick_tmp = data; @@ -1934,13 +2507,15 @@ dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) i=0; while (pch != NULL && ipkt_filter_add.filter[i][0], pch); - printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]); + CONFIG_MSG("pkt_filter_add[%d][] = %s\n", + i, &conf->pkt_filter_add.filter[i][0]); pch = bcmstrtok(&pick_tmp, ",.-", 0); i++; } conf->pkt_filter_add.count = i; } - else if (!strncmp("pkt_filter_del=", full_param, len_param)) { + else if (!strncmp("pkt_filter_delete=", full_param, len_param) || + !strncmp("pkt_filter_del=", full_param, len_param)) { pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ,.-", 0); i=0; @@ -1950,18 +2525,18 @@ dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) i++; } conf->pkt_filter_del.count = i; - printf("%s: pkt_filter_del id = ", __FUNCTION__); + CONFIG_MSG("pkt_filter_del id = "); for (i=0; ipkt_filter_del.count; i++) printf("%d ", conf->pkt_filter_del.id[i]); printf("\n"); } else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) { if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) { - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); } else { memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN); strcpy(conf->magic_pkt_filter_add, data); - printf("%s: magic_pkt_filter_add = %s\n", __FUNCTION__, conf->magic_pkt_filter_add); + CONFIG_MSG("magic_pkt_filter_add = %s\n", conf->magic_pkt_filter_add); } } else @@ -1972,6 +2547,9 @@ dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) #endif #ifdef ISAM_PREINIT +#if !defined(WL_EXT_IAPSTA) +#error "WL_EXT_IAPSTA should be defined to enable ISAM_PREINIT" +#endif /* !WL_EXT_IAPSTA */ /* * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1] * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x] @@ -1989,15 +2567,15 @@ dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param) if (!strncmp("isam_init=", full_param, len_param)) { sprintf(conf->isam_init, "isam_init %s", data); - printf("%s: isam_init=%s\n", __FUNCTION__, conf->isam_init); + CONFIG_MSG("isam_init=%s\n", conf->isam_init); } else if (!strncmp("isam_config=", full_param, len_param)) { sprintf(conf->isam_config, "isam_config %s", data); - printf("%s: isam_config=%s\n", __FUNCTION__, conf->isam_config); + CONFIG_MSG("isam_config=%s\n", conf->isam_config); } else if (!strncmp("isam_enable=", full_param, len_param)) { sprintf(conf->isam_enable, "isam_enable %s", data); - printf("%s: isam_enable=%s\n", __FUNCTION__, conf->isam_enable); + CONFIG_MSG("isam_enable=%s\n", conf->isam_enable); } else return false; @@ -2016,35 +2594,43 @@ dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param) if (!strncmp("dhcpc_enable=", full_param, len_param)) { conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10); - printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable); + CONFIG_MSG("dhcpc_enable = %d\n", conf->dhcpc_enable); } else if (!strncmp("dhcpd_enable=", full_param, len_param)) { conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10); - printf("%s: dhcpd_enable = %d\n", __FUNCTION__, conf->dhcpd_enable); + CONFIG_MSG("dhcpd_enable = %d\n", conf->dhcpd_enable); } else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) { - if (!bcm_atoipv4(data, &ipa_set)) - printf("%s : dhcpd_ip_addr adress setting failed.\n", __FUNCTION__); - conf->dhcpd_ip_addr = ipa_set; - printf("%s: dhcpd_ip_addr = %s\n",__FUNCTION__, data); + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_addr adress setting failed.n"); + return false; + } + memcpy(&conf->dhcpd_ip_addr, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_addr = %s\n", data); } else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) { - if (!bcm_atoipv4(data, &ipa_set)) - printf("%s : dhcpd_ip_mask adress setting failed.\n", __FUNCTION__); - conf->dhcpd_ip_mask = ipa_set; - printf("%s: dhcpd_ip_mask = %s\n",__FUNCTION__, data); + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_mask adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_mask, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_mask = %s\n", data); } else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) { - if (!bcm_atoipv4(data, &ipa_set)) - printf("%s : dhcpd_ip_start adress setting failed.\n", __FUNCTION__); - conf->dhcpd_ip_start = ipa_set; - printf("%s: dhcpd_ip_start = %s\n",__FUNCTION__, data); + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_start adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_start, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_start = %s\n", data); } else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) { - if (!bcm_atoipv4(data, &ipa_set)) - printf("%s : dhcpd_ip_end adress setting failed.\n", __FUNCTION__); - conf->dhcpd_ip_end = ipa_set; - printf("%s: dhcpd_ip_end = %s\n",__FUNCTION__, data); + if (!bcm_atoipv4(data, &ipa_set)) { + CONFIG_ERROR("dhcpd_ip_end adress setting failed\n"); + return false; + } + memcpy(&conf->dhcpd_ip_end, &ipa_set, sizeof(struct ipv4_addr)); + CONFIG_MSG("dhcpd_ip_end = %s\n", data); } else return false; @@ -2065,7 +2651,7 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) dhd_doflow = FALSE; else dhd_doflow = TRUE; - printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow); + CONFIG_MSG("dhd_doflow = %d\n", dhd_doflow); } else if (!strncmp("dhd_slpauto=", full_param, len_param) || !strncmp("kso_enable=", full_param, len_param)) { @@ -2073,23 +2659,19 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) dhd_slpauto = FALSE; else dhd_slpauto = TRUE; - printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); + CONFIG_MSG("dhd_slpauto = %d\n", dhd_slpauto); } else if (!strncmp("use_rxchain=", full_param, len_param)) { conf->use_rxchain = (int)simple_strtol(data, NULL, 10); - printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain); + CONFIG_MSG("use_rxchain = %d\n", conf->use_rxchain); } else if (!strncmp("dhd_txminmax=", full_param, len_param)) { conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10); - printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax); + CONFIG_MSG("dhd_txminmax = %d\n", conf->dhd_txminmax); } else if (!strncmp("txinrx_thres=", full_param, len_param)) { conf->txinrx_thres = (int)simple_strtol(data, NULL, 10); - printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres); - } - else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) { - conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10); - printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize); + CONFIG_MSG("txinrx_thres = %d\n", conf->txinrx_thres); } #if defined(HW_OOB) else if (!strncmp("oob_enabled_later=", full_param, len_param)) { @@ -2097,34 +2679,30 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->oob_enabled_later = FALSE; else conf->oob_enabled_later = TRUE; - printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later); + CONFIG_MSG("oob_enabled_later = %d\n", conf->oob_enabled_later); } #endif else if (!strncmp("dpc_cpucore=", full_param, len_param)) { conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10); - printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore); + CONFIG_MSG("dpc_cpucore = %d\n", conf->dpc_cpucore); } else if (!strncmp("rxf_cpucore=", full_param, len_param)) { conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10); - printf("%s: rxf_cpucore = %d\n", __FUNCTION__, conf->rxf_cpucore); - } - else if (!strncmp("orphan_move=", full_param, len_param)) { - conf->orphan_move = (int)simple_strtol(data, NULL, 10); - printf("%s: orphan_move = %d\n", __FUNCTION__, conf->orphan_move); + CONFIG_MSG("rxf_cpucore = %d\n", conf->rxf_cpucore); } #if defined(BCMSDIOH_TXGLOM) else if (!strncmp("txglomsize=", full_param, len_param)) { conf->txglomsize = (uint)simple_strtol(data, NULL, 10); if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) conf->txglomsize = SDPCM_MAXGLOM_SIZE; - printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize); + CONFIG_MSG("txglomsize = %d\n", conf->txglomsize); } else if (!strncmp("txglom_ext=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->txglom_ext = FALSE; else conf->txglom_ext = TRUE; - printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext); + CONFIG_MSG("txglom_ext = %d\n", conf->txglom_ext); if (conf->txglom_ext) { if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) conf->txglom_bucket_size = 1680; @@ -2132,34 +2710,52 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) conf->txglom_bucket_size = 1684; } - printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size); + CONFIG_MSG("txglom_bucket_size = %d\n", conf->txglom_bucket_size); } else if (!strncmp("bus:rxglom=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->bus_rxglom = FALSE; else conf->bus_rxglom = TRUE; - printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom); + CONFIG_MSG("bus:rxglom = %d\n", conf->bus_rxglom); } else if (!strncmp("deferred_tx_len=", full_param, len_param)) { conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10); - printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len); + CONFIG_MSG("deferred_tx_len = %d\n", conf->deferred_tx_len); } else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) { conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0); - printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix); + CONFIG_MSG("txctl_tmo_fix = %d\n", conf->txctl_tmo_fix); } else if (!strncmp("tx_max_offset=", full_param, len_param)) { conf->tx_max_offset = (int)simple_strtol(data, NULL, 10); - printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset); + CONFIG_MSG("tx_max_offset = %d\n", conf->tx_max_offset); } else if (!strncmp("txglom_mode=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->txglom_mode = FALSE; else conf->txglom_mode = TRUE; - printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode); + CONFIG_MSG("txglom_mode = %d\n", conf->txglom_mode); + } +#if defined(SDIO_ISR_THREAD) + else if (!strncmp("intr_extn=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->intr_extn = FALSE; + else + conf->intr_extn = TRUE; + CONFIG_MSG("intr_extn = %d\n", conf->intr_extn); + } +#endif +#ifdef BCMSDIO_RXLIM_POST + else if (!strncmp("rxlim_en=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->rxlim_en = FALSE; + else + conf->rxlim_en = TRUE; + CONFIG_MSG("rxlim_en = %d\n", conf->rxlim_en); } +#endif #endif else return false; @@ -2180,7 +2776,7 @@ dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->bus_deepsleep_disable = 0; else conf->bus_deepsleep_disable = 1; - printf("%s: bus:deepsleep_disable = %d\n", __FUNCTION__, conf->bus_deepsleep_disable); + CONFIG_MSG("bus:deepsleep_disable = %d\n", conf->bus_deepsleep_disable); } else return false; @@ -2200,56 +2796,83 @@ dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->deepsleep = TRUE; else conf->deepsleep = FALSE; - printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep); + CONFIG_MSG("deepsleep = %d\n", conf->deepsleep); } else if (!strncmp("PM=", full_param, len_param)) { conf->pm = (int)simple_strtol(data, NULL, 10); - printf("%s: PM = %d\n", __FUNCTION__, conf->pm); + CONFIG_MSG("PM = %d\n", conf->pm); } else if (!strncmp("pm_in_suspend=", full_param, len_param)) { conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10); - printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend); + CONFIG_MSG("pm_in_suspend = %d\n", conf->pm_in_suspend); } else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) { conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10); - printf("%s: suspend_bcn_li_dtim = %d\n", __FUNCTION__, conf->suspend_bcn_li_dtim); + CONFIG_MSG("suspend_bcn_li_dtim = %d\n", conf->suspend_bcn_li_dtim); } else if (!strncmp("xmit_in_suspend=", full_param, len_param)) { if (!strncmp(data, "1", 1)) conf->insuspend &= ~NO_TXDATA_IN_SUSPEND; else conf->insuspend |= NO_TXDATA_IN_SUSPEND; - printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend); + CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend); } else if (!strncmp("insuspend=", full_param, len_param)) { conf->insuspend = (int)simple_strtol(data, NULL, 0); - printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend); + CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend); + } +#ifdef WL_EXT_WOWL + else if (!strncmp("wowl=", full_param, len_param)) { + conf->wowl = (int)simple_strtol(data, NULL, 0); + CONFIG_MSG("wowl = 0x%x\n", conf->wowl); } +#endif else return false; return true; } +#ifdef GET_CUSTOM_MAC_FROM_CONFIG +int +bcm_str2hex(const char *p, char *ea, int size) +{ + int i = 0; + char *ep; + + for (;;) { + ea[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == size) + break; + } + + return (i == size); +} +#endif + bool dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) { struct dhd_conf *conf = dhd->conf; char *data = full_param+len_param; - uint len_data = strlen(data); char *pch, *pick_tmp; int i; +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + struct ether_addr ea_addr; + char macpad[56]; +#endif if (!strncmp("dhd_poll=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->dhd_poll = 0; else conf->dhd_poll = 1; - printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll); + CONFIG_MSG("dhd_poll = %d\n", conf->dhd_poll); } else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) { dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10); - printf("%s: dhd_watchdog_ms = %d\n", __FUNCTION__, dhd_watchdog_ms); + CONFIG_MSG("dhd_watchdog_ms = %d\n", dhd_watchdog_ms); } else if (!strncmp("band=", full_param, len_param)) { /* Process band: @@ -2261,39 +2884,29 @@ dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) conf->band = WLC_BAND_5G; else conf->band = WLC_BAND_AUTO; - printf("%s: band = %d\n", __FUNCTION__, conf->band); + CONFIG_MSG("band = %d\n", conf->band); } else if (!strncmp("bw_cap_2g=", full_param, len_param)) { conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0); - printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap[0]); + CONFIG_MSG("bw_cap_2g = %d\n", conf->bw_cap[0]); } else if (!strncmp("bw_cap_5g=", full_param, len_param)) { conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0); - printf("%s: bw_cap_5g = %d\n", __FUNCTION__, conf->bw_cap[1]); + CONFIG_MSG("bw_cap_5g = %d\n", conf->bw_cap[1]); } else if (!strncmp("bw_cap=", full_param, len_param)) { pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ,.-", 0); if (pch != NULL) { conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: bw_cap 2g = %d\n", __FUNCTION__, conf->bw_cap[0]); + CONFIG_MSG("bw_cap 2g = %d\n", conf->bw_cap[0]); } pch = bcmstrtok(&pick_tmp, " ,.-", 0); if (pch != NULL) { conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: bw_cap 5g = %d\n", __FUNCTION__, conf->bw_cap[1]); + CONFIG_MSG("bw_cap 5g = %d\n", conf->bw_cap[1]); } } - else if (!strncmp("ccode=", full_param, len_param)) { - memset(&conf->cspec, 0, sizeof(wl_country_t)); - memcpy(conf->cspec.country_abbrev, data, len_data); - memcpy(conf->cspec.ccode, data, len_data); - printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode); - } - else if (!strncmp("regrev=", full_param, len_param)) { - conf->cspec.rev = (int32)simple_strtol(data, NULL, 10); - printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev); - } else if (!strncmp("channels=", full_param, len_param)) { pick_tmp = data; pch = bcmstrtok(&pick_tmp, " ,.-", 0); @@ -2304,93 +2917,114 @@ dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) i++; } conf->channels.count = i; - printf("%s: channels = ", __FUNCTION__); + CONFIG_MSG("channels = "); for (i=0; ichannels.count; i++) printf("%d ", conf->channels.channel[i]); printf("\n"); } else if (!strncmp("keep_alive_period=", full_param, len_param)) { conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10); - printf("%s: keep_alive_period = %d\n", __FUNCTION__, - conf->keep_alive_period); + CONFIG_MSG("keep_alive_period = %d\n", conf->keep_alive_period); } - else if (!strncmp("phy_oclscdenable=", full_param, len_param)) { - conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10); - printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable); +#ifdef ARP_OFFLOAD_SUPPORT + else if (!strncmp("garp=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->garp = FALSE; + else + conf->garp = TRUE; + CONFIG_MSG("garp = %d\n", conf->garp); } +#endif else if (!strncmp("srl=", full_param, len_param)) { conf->srl = (int)simple_strtol(data, NULL, 10); - printf("%s: srl = %d\n", __FUNCTION__, conf->srl); + CONFIG_MSG("srl = %d\n", conf->srl); } else if (!strncmp("lrl=", full_param, len_param)) { conf->lrl = (int)simple_strtol(data, NULL, 10); - printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl); + CONFIG_MSG("lrl = %d\n", conf->lrl); } else if (!strncmp("bcn_timeout=", full_param, len_param)) { conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10); - printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout); - } - else if (!strncmp("txbf=", full_param, len_param)) { - conf->txbf = (int)simple_strtol(data, NULL, 10); - printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf); + CONFIG_MSG("bcn_timeout = %d\n", conf->bcn_timeout); } else if (!strncmp("frameburst=", full_param, len_param)) { conf->frameburst = (int)simple_strtol(data, NULL, 10); - printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst); + CONFIG_MSG("frameburst = %d\n", conf->frameburst); } else if (!strncmp("disable_proptx=", full_param, len_param)) { conf->disable_proptx = (int)simple_strtol(data, NULL, 10); - printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx); + CONFIG_MSG("disable_proptx = %d\n", conf->disable_proptx); } #ifdef DHDTCPACK_SUPPRESS else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) { conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10); - printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode); + CONFIG_MSG("tcpack_sup_mode = %d\n", conf->tcpack_sup_mode); } #endif else if (!strncmp("pktprio8021x=", full_param, len_param)) { conf->pktprio8021x = (int)simple_strtol(data, NULL, 10); - printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x); + CONFIG_MSG("pktprio8021x = %d\n", conf->pktprio8021x); } #if defined(BCMSDIO) || defined(BCMPCIE) else if (!strncmp("dhd_txbound=", full_param, len_param)) { dhd_txbound = (uint)simple_strtol(data, NULL, 10); - printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound); + CONFIG_MSG("dhd_txbound = %d\n", dhd_txbound); } else if (!strncmp("dhd_rxbound=", full_param, len_param)) { dhd_rxbound = (uint)simple_strtol(data, NULL, 10); - printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound); + CONFIG_MSG("dhd_rxbound = %d\n", dhd_rxbound); } #endif + else if (!strncmp("orphan_move=", full_param, len_param)) { + conf->orphan_move = (int)simple_strtol(data, NULL, 10); + CONFIG_MSG("orphan_move = %d\n", conf->orphan_move); + } else if (!strncmp("tsq=", full_param, len_param)) { conf->tsq = (int)simple_strtol(data, NULL, 10); - printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq); + CONFIG_MSG("tsq = %d\n", conf->tsq); } else if (!strncmp("ctrl_resched=", full_param, len_param)) { conf->ctrl_resched = (int)simple_strtol(data, NULL, 10); - printf("%s: ctrl_resched = %d\n", __FUNCTION__, conf->ctrl_resched); - } - else if (!strncmp("dhd_ioctl_timeout_msec=", full_param, len_param)) { - conf->dhd_ioctl_timeout_msec = (int)simple_strtol(data, NULL, 10); - printf("%s: dhd_ioctl_timeout_msec = %d\n", __FUNCTION__, conf->dhd_ioctl_timeout_msec); + CONFIG_MSG("ctrl_resched = %d\n", conf->ctrl_resched); } else if (!strncmp("in4way=", full_param, len_param)) { conf->in4way = (int)simple_strtol(data, NULL, 0); - printf("%s: in4way = 0x%x\n", __FUNCTION__, conf->in4way); - } - else if (!strncmp("max_wait_gc_time=", full_param, len_param)) { - conf->max_wait_gc_time = (int)simple_strtol(data, NULL, 0); - printf("%s: max_wait_gc_time = %d\n", __FUNCTION__, conf->max_wait_gc_time); + CONFIG_MSG("in4way = 0x%x\n", conf->in4way); } else if (!strncmp("wl_preinit=", full_param, len_param)) { if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) { - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + CONFIG_ERROR("kmalloc failed\n"); } else { memset(conf->wl_preinit, 0, len_param+1); strcpy(conf->wl_preinit, data); - printf("%s: wl_preinit = %s\n", __FUNCTION__, conf->wl_preinit); + CONFIG_MSG("wl_preinit = %s\n", conf->wl_preinit); + } + } +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + else if (!strncmp("mac=", full_param, len_param)) { + if (!bcm_ether_atoe(data, &ea_addr)) { + CONFIG_ERROR("mac adress read error"); + return false; + } + memcpy(&conf->hw_ether, &ea_addr, ETHER_ADDR_LEN); + CONFIG_MSG("mac = %s\n", data); + } + else if (!strncmp("macpad=", full_param, len_param)) { + if (!bcm_str2hex(data, macpad, sizeof(macpad))) { + CONFIG_ERROR("macpad adress read error"); + return false; + } + memcpy(&conf->hw_ether[ETHER_ADDR_LEN], macpad, sizeof(macpad)); + if (config_msg_level & CONFIG_TRACE_LEVEL) { + printf("macpad =\n"); + for (i=0; ihw_ether[ETHER_ADDR_LEN+i]); + if ((i+1)%8 == 0) + printf("\n"); + } } } +#endif else return false; @@ -2400,7 +3034,7 @@ dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) { - int bcmerror = -1; + int bcmerror = -1, chip_match = -1; uint len = 0, start_pos=0; void * image = NULL; char * memblock = NULL; @@ -2417,22 +3051,20 @@ dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) if (conf_file_exists) { image = dhd_os_open_image1(dhd, conf_path); if (image == NULL) { - printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path); + CONFIG_MSG("Ignore config file %s\n", conf_path); goto err; } } memblock = MALLOC(dhd->osh, MAXSZ_CONFIG); if (memblock == NULL) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_CONFIG)); + CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_CONFIG); goto err; } pick = MALLOC(dhd->osh, MAXSZ_BUF); if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); + CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_BUF); goto err; } @@ -2451,24 +3083,30 @@ dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) if (pch != NULL) { len_param = pch-pick+1; if (len_param == strlen(pick)) { - CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + CONFIG_ERROR("not a right parameter %s\n", pick); continue; } } else { - CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + CONFIG_ERROR("not a right parameter %s\n", pick); continue; } + dhd_conf_read_chiprev(dhd, &chip_match, pick, len_param); + if (!chip_match) + continue; + if (dhd_conf_read_log_level(dhd, pick, len_param)) continue; else if (dhd_conf_read_roam_params(dhd, pick, len_param)) continue; else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param)) continue; +#ifdef BCMSDIO else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param)) continue; else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param)) continue; +#endif else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param)) continue; else if (dhd_conf_read_country_list(dhd, pick, len_param)) @@ -2505,7 +3143,7 @@ dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) bcmerror = 0; } else { - CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len)); + CONFIG_ERROR("error reading config file: %d\n", len); bcmerror = BCME_SDIO_ERROR; } @@ -2525,7 +3163,7 @@ err: int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev) { - printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev); + CONFIG_MSG("chip=0x%x, chiprev=%d\n", chip, chiprev); dhd->conf->chip = chip; dhd->conf->chiprev = chiprev; return 0; @@ -2566,6 +3204,17 @@ dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) } #endif // other parameters set in preinit or config.txt + if (conf->txglom_ext) + CONFIG_MSG("txglom_ext=%d, txglom_bucket_size=%d\n", + conf->txglom_ext, conf->txglom_bucket_size); + CONFIG_MSG("txglom_mode=%s\n", + conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy"); + CONFIG_MSG("txglomsize=%d, deferred_tx_len=%d\n", + conf->txglomsize, conf->deferred_tx_len); + CONFIG_MSG("txinrx_thres=%d, dhd_txminmax=%d\n", + conf->txinrx_thres, conf->dhd_txminmax); + CONFIG_MSG("tx_max_offset=%d, txctl_tmo_fix=%d\n", + conf->tx_max_offset, conf->txctl_tmo_fix); } else { // clear txglom parameters conf->txglom_ext = FALSE; @@ -2573,17 +3222,6 @@ dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) conf->txglomsize = 0; conf->deferred_tx_len = 0; } - if (conf->txglom_ext) - printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__, - conf->txglom_ext, conf->txglom_bucket_size); - printf("%s: txglom_mode=%s\n", __FUNCTION__, - conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy"); - printf("%s: txglomsize=%d, deferred_tx_len=%d\n", __FUNCTION__, - conf->txglomsize, conf->deferred_tx_len); - printf("%s: txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__, - conf->txinrx_thres, conf->dhd_txminmax); - printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__, - conf->tx_max_offset, conf->txctl_tmo_fix); } #endif @@ -2598,9 +3236,9 @@ dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *buf) rsdb_mode_cfg.config = (int)simple_strtol(pch, NULL, 0); if (pch) { - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg, + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg, sizeof(rsdb_mode_cfg), TRUE); - printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config); + CONFIG_MSG("rsdb_mode %d\n", rsdb_mode_cfg.config); } return 0; @@ -2682,7 +3320,7 @@ dhd_conf_postinit_ioctls(dhd_pub_t *dhd) struct dhd_conf *conf = dhd->conf; char wl_preinit[] = "assoc_retry_max=30"; - dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE); dhd_conf_map_country_list(dhd, &conf->cspec); dhd_conf_set_country(dhd, &conf->cspec); dhd_conf_fix_country(dhd); @@ -2690,7 +3328,7 @@ dhd_conf_postinit_ioctls(dhd_pub_t *dhd) dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE); dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE); - dhd_conf_set_intiovar(dhd, WLC_SET_PM, "PM", conf->pm, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", conf->pm, 0, FALSE); dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE); dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE); dhd_conf_set_bw_cap(dhd); @@ -2702,27 +3340,46 @@ dhd_conf_postinit_ioctls(dhd_pub_t *dhd) #endif /* defined(BCMPCIE) */ #ifdef IDHCP - dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, 0, FALSE); - if (dhd->conf->dhcpd_enable >= 0) { - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr", + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, + 0, FALSE); + if (conf->dhcpd_enable >= 0) { + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_addr", (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask", + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_mask", (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start", + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_start", (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE); - dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end", + dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_end", (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE); dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable", conf->dhcpd_enable, 0, FALSE); } #endif - dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", conf->txbf, 0, FALSE); - dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", conf->frameburst, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", + conf->frameburst, 0, FALSE); + dhd_conf_set_wl_preinit(dhd, wl_preinit); +#if defined(BCMSDIO) + { + char ampdu_mpdu[] = "ampdu_mpdu=16"; + dhd_conf_set_wl_preinit(dhd, ampdu_mpdu); + } +#endif + if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4371_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM43569_CHIP_ID) { + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", 1, 0, FALSE); + } +#if defined(WLEASYMESH) + { + char ezmesh[] = "mbss=1, rsdb_mode=0"; + dhd_conf_set_wl_preinit(dhd, ezmesh); + } +#endif dhd_conf_set_wl_preinit(dhd, conf->wl_preinit); #ifndef WL_CFG80211 - dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE); #endif } @@ -2733,7 +3390,7 @@ dhd_conf_preinit(dhd_pub_t *dhd) struct dhd_conf *conf = dhd->conf; int i; - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + CONFIG_TRACE("Enter\n"); #ifdef BCMSDIO dhd_conf_free_mac_list(&conf->fw_by_mac); @@ -2772,34 +3429,20 @@ dhd_conf_preinit(dhd_pub_t *dhd) memset(&conf->channels, 0, sizeof(wl_channel_list_t)); conf->roam_off = 1; conf->roam_off_suspend = 1; -#ifdef CUSTOM_ROAM_TRIGGER_SETTING - conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING; -#else conf->roam_trigger[0] = -65; -#endif conf->roam_trigger[1] = WLC_BAND_ALL; conf->roam_scan_period[0] = 10; conf->roam_scan_period[1] = WLC_BAND_ALL; -#ifdef CUSTOM_ROAM_DELTA_SETTING - conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING; -#else - conf->roam_delta[0] = 15; -#endif + conf->roam_delta[0] = 10; conf->roam_delta[1] = WLC_BAND_ALL; -#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC - conf->fullroamperiod = 60; -#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ - conf->fullroamperiod = 120; -#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ -#ifdef CUSTOM_KEEP_ALIVE_SETTING - conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING; -#else - conf->keep_alive_period = 28000; + conf->fullroamperiod = 20; + conf->keep_alive_period = 30000; +#ifdef ARP_OFFLOAD_SUPPORT + conf->garp = FALSE; #endif conf->force_wme_ac = 0; memset(&conf->wme_sta, 0, sizeof(wme_param_t)); memset(&conf->wme_ap, 0, sizeof(wme_param_t)); - conf->phy_oclscdenable = -1; #ifdef PKT_FILTER_SUPPORT memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t)); memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t)); @@ -2807,7 +3450,6 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->srl = -1; conf->lrl = -1; conf->bcn_timeout = 16; - conf->txbf = -1; conf->disable_proptx = -1; conf->dhd_poll = -1; #ifdef BCMSDIO @@ -2817,15 +3459,19 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->tx_max_offset = 0; conf->txglomsize = SDPCM_DEFGLOM_SIZE; conf->txctl_tmo_fix = 300; - conf->txglom_mode = SDPCM_TXGLOM_CPY; + conf->txglom_mode = SDPCM_TXGLOM_MDESC; conf->deferred_tx_len = 0; conf->dhd_txminmax = 1; conf->txinrx_thres = -1; - conf->sd_f2_blocksize = 0; +#if defined(SDIO_ISR_THREAD) + conf->intr_extn = FALSE; +#endif +#ifdef BCMSDIO_RXLIM_POST + conf->rxlim_en = TRUE; +#endif #if defined(HW_OOB) conf->oob_enabled_later = FALSE; #endif - conf->orphan_move = 0; #endif #ifdef BCMPCIE conf->bus_deepsleep_disable = 1; @@ -2837,14 +3483,27 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->pm = -1; conf->pm_in_suspend = -1; conf->suspend_bcn_li_dtim = -1; +#ifdef WL_EXT_WOWL + dhd_master_mode = TRUE; + conf->wowl = WL_WOWL_NET|WL_WOWL_DIS|WL_WOWL_BCN; + conf->insuspend = WOWL_IN_SUSPEND | NO_TXDATA_IN_SUSPEND; +#else conf->insuspend = 0; +#endif + conf->suspended = FALSE; #ifdef SUSPEND_EVENT memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask)); + memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN); + conf->wlfc = FALSE; +#endif +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + memset(&conf->hw_ether, 0, sizeof(conf->hw_ether)); #endif #ifdef IDHCP conf->dhcpc_enable = -1; conf->dhcpd_enable = -1; #endif + conf->orphan_move = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) conf->tsq = 10; #else @@ -2859,9 +3518,7 @@ dhd_conf_preinit(dhd_pub_t *dhd) #endif conf->pktprio8021x = -1; conf->ctrl_resched = 2; - conf->dhd_ioctl_timeout_msec = 0; conf->in4way = NO_SCAN_IN4WAY | WAIT_DISCONNECTED; - conf->max_wait_gc_time = 300; #ifdef ISAM_PREINIT memset(conf->isam_init, 0, sizeof(conf->isam_init)); memset(conf->isam_config, 0, sizeof(conf->isam_config)); @@ -2872,12 +3529,29 @@ dhd_conf_preinit(dhd_pub_t *dhd) } #ifdef CUSTOMER_HW_AMLOGIC dhd_slpauto = FALSE; +#ifdef BCMSDIO + conf->txglom_mode = SDPCM_TXGLOM_CPY; #endif - if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || +#endif +#if defined(SDIO_ISR_THREAD) + if (conf->chip == BCM43012_CHIP_ID || + conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM4359_CHIP_ID || conf->chip == BCM43751_CHIP_ID || + conf->chip == BCM43752_CHIP_ID) { + conf->intr_extn = TRUE; + } +#endif + if ((conf->chip == BCM43430_CHIP_ID && conf->chiprev == 2) || + conf->chip == BCM43012_CHIP_ID || + conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID || - conf->chip == BCM4362_CHIP_ID || conf->chip == BCM43751_CHIP_ID) { + conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID) { #ifdef DHDTCPACK_SUPPRESS #ifdef BCMSDIO conf->tcpack_sup_mode = TCPACK_SUP_REPLACE; @@ -2887,17 +3561,15 @@ dhd_conf_preinit(dhd_pub_t *dhd) dhd_rxbound = 128; dhd_txbound = 64; #endif - conf->txbf = 1; conf->frameburst = 1; #ifdef BCMSDIO conf->dhd_txminmax = -1; conf->txinrx_thres = 128; - conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) conf->orphan_move = 1; #else conf->orphan_move = 0; -#endif #endif } @@ -2953,15 +3625,15 @@ dhd_conf_attach(dhd_pub_t *dhd) { dhd_conf_t *conf; - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + CONFIG_TRACE("Enter\n"); if (dhd->conf != NULL) { - printf("%s: config is attached before!\n", __FUNCTION__); + CONFIG_MSG("config is attached before!\n"); return 0; } /* Allocate private bus interface state */ if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) { - CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); + CONFIG_ERROR("MALLOC failed\n"); goto fail; } memset(conf, 0, sizeof(dhd_conf_t)); @@ -2979,7 +3651,7 @@ fail: void dhd_conf_detach(dhd_pub_t *dhd) { - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + CONFIG_TRACE("Enter\n"); if (dhd->conf) { #ifdef BCMSDIO diff --git a/bcmdhd.100.10.315.x/dhd_config.h b/bcmdhd.100.10.315.x/dhd_config.h index 1830c5f..7902171 100644 --- a/bcmdhd.100.10.315.x/dhd_config.h +++ b/bcmdhd.100.10.315.x/dhd_config.h @@ -88,7 +88,6 @@ typedef struct conf_pkt_filter_del { #define CONFIG_COUNTRY_LIST_SIZE 100 typedef struct conf_country_list { - uint32 count; wl_country_t *cspec[CONFIG_COUNTRY_LIST_SIZE]; } conf_country_list_t; @@ -110,40 +109,48 @@ enum in4way_flags { }; enum in_suspend_flags { - NO_EVENT_IN_SUSPEND = (1 << (0)), + NO_EVENT_IN_SUSPEND = (1 << (0)), NO_TXDATA_IN_SUSPEND = (1 << (1)), - AP_DOWN_IN_SUSPEND = (1 << (2)), - ROAM_OFFLOAD_IN_SUSPEND = (1 << (3)), + NO_TXCTL_IN_SUSPEND = (1 << (2)), + AP_DOWN_IN_SUSPEND = (1 << (3)), + ROAM_OFFLOAD_IN_SUSPEND = (1 << (4)), + AP_FILTER_IN_SUSPEND = (1 << (5)), + WOWL_IN_SUSPEND = (1 << (6)), + ALL_IN_SUSPEND = 0xFFFFFFFF, }; enum eapol_status { EAPOL_STATUS_NONE = 0, - EAPOL_STATUS_WPS_REQID, - EAPOL_STATUS_WPS_RSPID, - EAPOL_STATUS_WPS_WSC_START, - EAPOL_STATUS_WPS_M1, - EAPOL_STATUS_WPS_M2, - EAPOL_STATUS_WPS_M3, - EAPOL_STATUS_WPS_M4, - EAPOL_STATUS_WPS_M5, - EAPOL_STATUS_WPS_M6, - EAPOL_STATUS_WPS_M7, - EAPOL_STATUS_WPS_M8, - EAPOL_STATUS_WPS_DONE, - EAPOL_STATUS_WPA_START, - EAPOL_STATUS_WPA_M1, - EAPOL_STATUS_WPA_M2, - EAPOL_STATUS_WPA_M3, - EAPOL_STATUS_WPA_M4, - EAPOL_STATUS_WPA_END + EAPOL_STATUS_REQID = 1, + EAPOL_STATUS_RSPID = 2, + EAPOL_STATUS_WSC_START = 3, + EAPOL_STATUS_WPS_M1 = 4, + EAPOL_STATUS_WPS_M2 = 5, + EAPOL_STATUS_WPS_M3 = 6, + EAPOL_STATUS_WPS_M4 = 7, + EAPOL_STATUS_WPS_M5 = 8, + EAPOL_STATUS_WPS_M6 = 9, + EAPOL_STATUS_WPS_M7 = 10, + EAPOL_STATUS_WPS_M8 = 11, + EAPOL_STATUS_WSC_DONE = 12, + EAPOL_STATUS_4WAY_START = 13, + EAPOL_STATUS_4WAY_M1 = 14, + EAPOL_STATUS_4WAY_M2 = 15, + EAPOL_STATUS_4WAY_M3 = 16, + EAPOL_STATUS_4WAY_M4 = 17, + EAPOL_STATUS_GROUPKEY_M1 = 18, + EAPOL_STATUS_GROUPKEY_M2 = 19, + EAPOL_STATUS_4WAY_DONE = 20 }; typedef struct dhd_conf { uint chip; uint chiprev; int fw_type; +#ifdef BCMSDIO wl_mac_list_ctrl_t fw_by_mac; wl_mac_list_ctrl_t nv_by_mac; +#endif wl_chip_nv_path_list_ctrl_t nv_by_chip; conf_country_list_t country_list; int band; @@ -157,10 +164,12 @@ typedef struct dhd_conf { int roam_delta[2]; int fullroamperiod; uint keep_alive_period; +#ifdef ARP_OFFLOAD_SUPPORT + bool garp; +#endif int force_wme_ac; wme_param_t wme_sta; wme_param_t wme_ap; - int phy_oclscdenable; #ifdef PKT_FILTER_SUPPORT conf_pkt_filter_add_t pkt_filter_add; conf_pkt_filter_del_t pkt_filter_del; @@ -169,7 +178,6 @@ typedef struct dhd_conf { int srl; int lrl; uint bcn_timeout; - int txbf; int disable_proptx; int dhd_poll; #ifdef BCMSDIO @@ -193,9 +201,13 @@ typedef struct dhd_conf { int txglom_bucket_size; int txinrx_thres; int dhd_txminmax; // -1=DATABUFCNT(bus) - uint sd_f2_blocksize; bool oob_enabled_later; - int orphan_move; +#if defined(SDIO_ISR_THREAD) + bool intr_extn; +#endif +#ifdef BCMSDIO_RXLIM_POST + bool rxlim_en; +#endif #endif #ifdef BCMPCIE int bus_deepsleep_disable; @@ -215,6 +227,8 @@ typedef struct dhd_conf { bool suspended; #ifdef SUSPEND_EVENT char resume_eventmask[WL_EVENTING_MASK_LEN]; + struct ether_addr bssid_insuspend; + bool wlfc; #endif #ifdef IDHCP int dhcpc_enable; @@ -230,41 +244,39 @@ typedef struct dhd_conf { char isam_enable[50]; #endif int ctrl_resched; - int dhd_ioctl_timeout_msec; struct mchan_params mchan[MCHAN_MAX_NUM]; char *wl_preinit; int tsq; + int orphan_move; uint eapol_status; uint in4way; - uint max_wait_gc_time; +#ifdef WL_EXT_WOWL + uint wowl; +#endif +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + char hw_ether[62]; +#endif } dhd_conf_t; #ifdef BCMSDIO int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac); -void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *fw_path); -void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path); #if defined(HW_OOB) || defined(FORCE_WOWLAN) void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih); #endif void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); int dhd_conf_set_blksize(bcmsdh_info_t *sdh); #endif -void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path); -void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path); -void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path); -void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path); -#ifdef CONFIG_PATH_AUTO_SELECT -void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path); -#endif -int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down); -int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down); -uint dhd_conf_get_band(dhd_pub_t *dhd); +void dhd_conf_set_path_params(dhd_pub_t *dhd, void *sdh, void *sih, + char *fw_path, char *nv_path); +int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, + int def, bool down); +int dhd_conf_get_band(dhd_pub_t *dhd); int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec); int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec); int dhd_conf_fix_country(dhd_pub_t *dhd); bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); -void dhd_conf_set_wme(dhd_pub_t *dhd, int mode); +void dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode); void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source); void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); @@ -274,11 +286,19 @@ int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); uint dhd_conf_get_chip(void *context); uint dhd_conf_get_chiprev(void *context); int dhd_conf_get_pm(dhd_pub_t *dhd); - +int dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len, + int *hostsleep_set, int *hostsleep_val, int *ret); +void dhd_conf_get_hostsleep(dhd_pub_t *dhd, + int hostsleep_set, int hostsleep_val, int ret); +int dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period, + char *packet, bool bcast); +#ifdef ARP_OFFLOAD_SUPPORT +void dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable); +#endif #ifdef PROP_TXSTATUS int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); #endif -uint dhd_conf_get_insuspend(dhd_pub_t *dhd); +uint dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask); int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend); void dhd_conf_postinit_ioctls(dhd_pub_t *dhd); int dhd_conf_preinit(dhd_pub_t *dhd); @@ -286,5 +306,5 @@ int dhd_conf_reset(dhd_pub_t *dhd); int dhd_conf_attach(dhd_pub_t *dhd); void dhd_conf_detach(dhd_pub_t *dhd); void *dhd_get_pub(struct net_device *dev); -void *dhd_get_conf(struct net_device *dev); +int wl_pattern_atoh(char *src, char *dst); #endif /* _dhd_config_ */ diff --git a/bcmdhd.100.10.315.x/dhd_custom_gpio.c b/bcmdhd.100.10.315.x/dhd_custom_gpio.c index aa60f57..136d2a2 100644 --- a/bcmdhd.100.10.315.x/dhd_custom_gpio.c +++ b/bcmdhd.100.10.315.x/dhd_custom_gpio.c @@ -1,7 +1,7 @@ /* * Customer code to add GPIO control during WLAN start/stop * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dhd_custom_hikey.c b/bcmdhd.100.10.315.x/dhd_custom_hikey.c index b2e7781..38673ae 100644 --- a/bcmdhd.100.10.315.x/dhd_custom_hikey.c +++ b/bcmdhd.100.10.315.x/dhd_custom_hikey.c @@ -1,7 +1,7 @@ /* * Platform Dependent file for Hikey * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -217,6 +217,16 @@ static int dhd_wlan_get_wake_irq(void) } #endif /* BCMSDIO */ +#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE) +int +dhd_get_wlan_oob_gpio(void) +{ + return gpio_is_valid(wlan_host_wake_up) ? + gpio_get_value(wlan_host_wake_up) : -1; +} +EXPORT_SYMBOL(dhd_get_wlan_oob_gpio); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */ + struct resource dhd_wlan_resources = { .name = "bcmdhd_wlan_irq", .start = 0, /* Dummy */ diff --git a/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c b/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c index 7887406..ef9f910 100644 --- a/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c +++ b/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c @@ -1,7 +1,7 @@ /* * Platform Dependent file for usage of Preallocted Memory * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -23,7 +23,7 @@ * * <> * - * $Id: dhd_custom_memprealloc.c 744015 2018-01-31 05:51:10Z $ + * $Id: dhd_custom_memprealloc.c 805764 2019-02-20 08:46:57Z $ */ #include @@ -290,12 +290,14 @@ dhd_init_wlan_mem(void) int i; int j; +#if !defined(CONFIG_BCMDHD_PCIE) for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL); if (!wlan_static_skb[i]) { goto err_skb_alloc; } } +#endif /* !CONFIG_BCMDHD_PCIE */ for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL); diff --git a/bcmdhd.100.10.315.x/dhd_dbg.h b/bcmdhd.100.10.315.x/dhd_dbg.h index c43848b..5c84497 100644 --- a/bcmdhd.100.10.315.x/dhd_dbg.h +++ b/bcmdhd.100.10.315.x/dhd_dbg.h @@ -1,7 +1,7 @@ /* * Debug/trace/assert driver definitions for Dongle Host Driver. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,21 +24,13 @@ * * <> * - * $Id: dhd_dbg.h 759128 2018-04-24 03:48:17Z $ + * $Id: dhd_dbg.h 798329 2019-01-08 05:40:39Z $ */ #ifndef _dhd_dbg_ #define _dhd_dbg_ #ifdef DHD_LOG_DUMP -typedef enum { - DLD_BUF_TYPE_GENERAL = 0, - DLD_BUF_TYPE_PRESERVE, - DLD_BUF_TYPE_SPECIAL, - DLD_BUF_TYPE_ECNTRS, - DLD_BUF_TYPE_FILTER, - DLD_BUF_TYPE_ALL -} log_dump_type_t; extern char *dhd_log_dump_get_timestamp(void); extern void dhd_log_dump_write(int type, char *binary_data, int binary_len, const char *fmt, ...); @@ -62,6 +54,10 @@ extern void dhd_log_dump_write(int type, char *binary_data, #endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */ #define CONCISE_DUMP_BUFLEN 16 * 1024 #define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n" +#ifdef DHD_STATUS_LOGGING +#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n" +#endif /* DHD_STATUS_LOGGING */ +#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n" #define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n" #endif /* DHD_LOG_DUMP */ @@ -74,7 +70,7 @@ extern void dhd_log_dump_write(int type, char *binary_data, do { \ if (dhd_msg_level & DHD_ERROR_VAL) { \ printf args; \ - DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ DHD_LOG_DUMP_WRITE args; \ } \ } while (0) @@ -198,6 +194,16 @@ do { \ #define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0) #if defined(DHD_LOG_DUMP) +#if defined(DHD_LOG_PRINT_RATE_LIMIT) +#define DHD_FWLOG(args) \ + do { \ + if (dhd_msg_level & DHD_FWLOG_VAL) { \ + if (!log_print_threshold) \ + printf args; \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } while (0) +#else #define DHD_FWLOG(args) \ do { \ if (dhd_msg_level & DHD_FWLOG_VAL) { \ @@ -205,6 +211,7 @@ do { \ DHD_LOG_DUMP_WRITE args; \ } \ } while (0) +#endif // endif #else /* DHD_LOG_DUMP */ #define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) #endif /* DHD_LOG_DUMP */ @@ -346,6 +353,12 @@ do { \ #define DHD_NONE(args) extern int dhd_msg_level; +#ifdef DHD_LOG_PRINT_RATE_LIMIT +extern int log_print_threshold; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + +#define DHD_RTT_MEM(args) DHD_LOG_MEM(args) +#define DHD_RTT_ERR(args) DHD_ERROR(args) /* Defines msg bits */ #include diff --git a/bcmdhd.100.10.315.x/dhd_dbg_ring.c b/bcmdhd.100.10.315.x/dhd_dbg_ring.c index 6d0553f..d1e84e1 100644 --- a/bcmdhd.100.10.315.x/dhd_dbg_ring.c +++ b/bcmdhd.100.10.315.x/dhd_dbg_ring.c @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -23,7 +23,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_dbg_ring.c 769272 2018-06-25 09:23:27Z $ + * $Id: dhd_dbg_ring.c 792099 2018-12-03 15:45:56Z $ */ #include #include @@ -36,7 +36,7 @@ int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, - uint32 ring_sz, void *allocd_buf) + uint32 ring_sz, void *allocd_buf, bool pull_inactive) { void *buf; unsigned long flags = 0; @@ -62,6 +62,7 @@ dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, ring->state = RING_SUSPEND; ring->rem_len = 0; ring->sched_pull = TRUE; + ring->pull_inactive = pull_inactive; DHD_DBG_RING_UNLOCK(ring->lock, flags); return BCME_OK; @@ -198,7 +199,7 @@ dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data) ring->rp); /* check bounds before incrementing read ptr */ if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) { - DHD_ERROR(("%s: RING%d[%s] rp points out of boundary," + DHD_ERROR(("%s: RING%d[%s] rp points out of boundary, " "ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n", __FUNCTION__, ring->id, ring->name, ring->wp, ring->rp, ring->ring_size)); @@ -259,32 +260,36 @@ dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data) return BCME_OK; } +/* + * This function folds ring->lock, so callers of this function + * should not hold ring->lock. + */ int -dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, - bool strip_header) +dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header) { dhd_dbg_ring_entry_t *r_entry = NULL; uint32 rlen = 0; char *buf = NULL; + unsigned long flags; if (!ring || !data || buf_len <= 0) { return 0; } + DHD_DBG_RING_LOCK(ring->lock, flags); + /* pull from ring is allowed for inactive (suspended) ring * in case of ecounters only, this is because, for ecounters * when a trap occurs the ring is suspended and data is then * pulled to dump it to a file. For other rings if ring is * not in active state return without processing (as before) */ -#ifndef EWP_ECNTRS_LOGGING - if (ring->state != RING_ACTIVE) { - return 0; + if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) { + goto exit; } -#endif /* EWP_ECNTRS_LOGGING */ if (ring->rp == ring->wp) { - return 0; + goto exit; } DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n", @@ -299,7 +304,8 @@ dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d," " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen, ring->ring_size, ring->id, ring->name, ring->rp)); - return 0; + rlen = 0; + goto exit; } if (strip_header) { @@ -316,7 +322,8 @@ dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, __FUNCTION__, ring->id, ring->name, ring->ring_size, ring->wp, ring->rp)); ASSERT(0); - return 0; + rlen = 0; + goto exit; } memcpy(data, buf, rlen); @@ -324,7 +331,7 @@ dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, ring->rp += ENTRY_LENGTH(r_entry); /* don't pass wp but skip padding if there is one */ if (ring->rp != ring->wp && - ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) { + ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) { DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n", __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp)); ring->rp = 0; @@ -336,12 +343,16 @@ dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id, ring->name, ring->rp, ring->ring_size)); ASSERT(0); - return 0; + rlen = 0; + goto exit; } ring->stat.read_bytes += ENTRY_LENGTH(r_entry); DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp)); +exit: + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return rlen; } @@ -349,12 +360,17 @@ int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr) { int32 r_len, total_r_len = 0; + unsigned long flags; if (!ring || !data) return 0; - if (ring->state != RING_ACTIVE) + DHD_DBG_RING_LOCK(ring->lock, flags); + if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) { + DHD_DBG_RING_UNLOCK(ring->lock, flags); return 0; + } + DHD_DBG_RING_UNLOCK(ring->lock, flags); while (buf_len > 0) { r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr); diff --git a/bcmdhd.100.10.315.x/dhd_dbg_ring.h b/bcmdhd.100.10.315.x/dhd_dbg_ring.h index c1d3041..ed83466 100644 --- a/bcmdhd.100.10.315.x/dhd_dbg_ring.h +++ b/bcmdhd.100.10.315.x/dhd_dbg_ring.h @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -23,7 +23,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_dbg_ring.h 754183 2018-03-26 11:15:16Z $ + * $Id: dhd_dbg_ring.h 795094 2018-12-17 08:56:58Z $ */ #ifndef __DHD_DBG_RING_H__ @@ -81,6 +81,7 @@ typedef struct dhd_dbg_ring { uint32 ring_size; /* numbers of item in ring */ uint32 wp; /* write pointer */ uint32 rp; /* read pointer */ + uint32 rp_tmp; /* tmp read pointer */ uint32 log_level; /* log_level */ uint32 threshold; /* threshold bytes */ void * ring_buf; /* pointer of actually ring buffer */ @@ -90,6 +91,7 @@ typedef struct dhd_dbg_ring { bool tail_padded; /* writer does not have enough space */ uint32 rem_len; /* number of bytes from wp_pad to end */ bool sched_pull; /* schedule reader immediately */ + bool pull_inactive; /* pull contents from ring even if it is inactive */ } dhd_dbg_ring_t; #define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3) @@ -109,6 +111,7 @@ typedef struct dhd_dbg_ring { #define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE) #define PAYLOAD_MAX_LEN 65535 #define PAYLOAD_ECNTR_MAX_LEN 1648u +#define PAYLOAD_RTT_MAX_LEN 1648u #define PENDING_LEN_MAX 0xFFFFFFFF #define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t)) @@ -122,7 +125,7 @@ typedef struct dhd_dbg_ring { typedef void (*os_pullreq_t)(void *os_priv, const int ring_id); int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, - uint32 ring_sz, void *allocd_buf); + uint32 ring_sz, void *allocd_buf, bool pull_inactive); void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring); int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data); int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, diff --git a/bcmdhd.100.10.315.x/dhd_debug.c b/bcmdhd.100.10.315.x/dhd_debug.c index 8ea1ead..bd3398b 100644 --- a/bcmdhd.100.10.315.x/dhd_debug.c +++ b/bcmdhd.100.10.315.x/dhd_debug.c @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -23,7 +23,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_debug.c 771435 2018-07-10 05:35:24Z $ + * $Id: dhd_debug.c 823976 2019-06-06 11:39:07Z $ */ #include @@ -141,6 +141,7 @@ struct tracelog_header { }; #define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06 +void print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr); int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data) { @@ -170,6 +171,21 @@ dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, vo return ret; } +dhd_dbg_ring_t * +dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id) +{ + if (!dhdp || !dhdp->dbg) { + return NULL; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return NULL; + } + + return &dhdp->dbg->dbg_rings[ring_id]; +} + int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, bool strip_header) @@ -355,13 +371,54 @@ check_valid_string_format(char *curr_ptr) } } +/* To identify format of non string format types */ +bool +check_valid_non_string_format(char *curr_ptr) +{ + char *next_ptr; + char *next_fmt_stptr; + char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u', + 'f', 'F', 'e', 'E', 'g', 'G', 'o', + 'a', 'A', 'n'}; + int i; + bool valid = FALSE; + + /* Check for next % in the fmt str */ + next_fmt_stptr = bcmstrstr(curr_ptr, "%"); + + for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) { + for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) { + if (*next_ptr == valid_fmt_types[i]) { + /* Check whether format type found corresponds to current % + * and not the next one, if exists. + */ + if ((next_fmt_stptr == NULL) || + (next_fmt_stptr && (next_ptr < next_fmt_stptr))) { + /* Not validating for length/width fields in + * format specifier. + */ + valid = TRUE; + } + goto done; + } + } + } + +done: + return valid; +} + #define MAX_NO_OF_ARG 16 -#define FMTSTR_SIZE 132 -#define ROMSTR_SIZE 200 +#define FMTSTR_SIZE 200 +#define ROMSTR_SIZE 268 #define SIZE_LOC_STR 50 #define LOG_PRINT_CNT_MAX 16u #define EL_PARSE_VER "V02" #define EL_MSEC_PER_SEC 1000 +#ifdef DHD_LOG_PRINT_RATE_LIMIT +#define MAX_LOG_PRINT_COUNT 100u +#define LOG_PRINT_THRESH (1u * USEC_PER_SEC) +#endif // endif bool dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr) @@ -427,8 +484,8 @@ dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *pr * */ prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ? - *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET : - *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_OFFSET; + *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) : + *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET); /* update event log data pointer address */ prcd_log_hdr->log_ptr = @@ -520,10 +577,34 @@ dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, u_arg arg[MAX_NO_OF_ARG] = {{0}}; char *c_ptr = NULL; struct bcmstrbuf b; +#ifdef DHD_LOG_PRINT_RATE_LIMIT + static int log_print_count = 0; + static uint64 ts0 = 0; + uint64 ts1 = 0; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ BCM_REFERENCE(arg); - /* print the message out in a logprint */ +#ifdef DHD_LOG_PRINT_RATE_LIMIT + if (!ts0) + ts0 = OSL_SYSUPTIME_US(); + + ts1 = OSL_SYSUPTIME_US(); + + if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) { + log_print_threshold = 1; + ts0 = 0; + log_print_count = 0; + DHD_ERROR(("%s: Log print water mark is reached," + " console logs are dumped only to debug_dump file\n", __FUNCTION__)); + } else if ((ts1 - ts0) > LOG_PRINT_THRESH) { + log_print_threshold = 0; + ts0 = 0; + log_print_count = 0; + } + +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ + /* print the message out in a logprint. Logprint expects raw format number */ if (!(raw_event->fmts)) { if (dhdp->dbg) { log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level; @@ -540,7 +621,7 @@ dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), plog_hdr->tag, plog_hdr->count, - plog_hdr->fmt_num)); + plog_hdr->fmt_num_raw)); for (count = 0; count < (plog_hdr->count - 1); count++) { if (count && (count % LOG_PRINT_CNT_MAX == 0)) { @@ -559,17 +640,20 @@ dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, EL_PARSE_VER, logset, block, plog_hdr->tag, plog_hdr->count, - plog_hdr->fmt_num); + plog_hdr->fmt_num_raw); for (count = 0; count < (plog_hdr->count - 1); count++) { bcm_bprintf(&b, " %x", log_ptr[count]); } /* ensure preserve fw logs go to debug_dump only in case of customer4 */ - if (logset < WL_MAX_PRESERVE_BUFFER && + if (logset < dhdp->event_log_max_sets && ((0x01u << logset) & dhdp->logset_prsrv_mask)) { DHD_PRSRV_MEM(("%s\n", b.origbuf)); } else { - DHD_EVENT(("%s\n", b.origbuf)); + DHD_FWLOG(("%s\n", b.origbuf)); +#ifdef DHD_LOG_PRINT_RATE_LIMIT + log_print_count++; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ } } return; @@ -659,23 +743,29 @@ dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, "(s)0x%x", log_ptr[count]); arg[count].addr = str_buf[count]; } - } else { - /* Other than string */ + } else if (check_valid_non_string_format(c_ptr)) { + /* Other than string format */ arg[count].val = log_ptr[count]; + } else { + *(c_ptr - 1) = '\0'; + break; } } } /* ensure preserve fw logs go to debug_dump only in case of customer4 */ - if (logset < WL_MAX_PRESERVE_BUFFER && + if (logset < dhdp->event_log_max_sets && ((0x01u << logset) & dhdp->logset_prsrv_mask)) { DHD_PRSRV_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13], arg[14], arg[15])); } else { - DHD_EVENT((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13], arg[14], arg[15])); +#ifdef DHD_LOG_PRINT_RATE_LIMIT + log_print_count++; +#endif /* DHD_LOG_PRINT_RATE_LIMIT */ } exit: @@ -703,14 +793,28 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, struct tracelog_header *logentry_header; uint ring_data_len = 0; bool ecntr_pushed = FALSE; + bool rtt_pushed = FALSE; + bool dll_inited = FALSE; uint32 logset = 0; uint16 block = 0; + bool event_log_max_sets_queried; + uint32 event_log_max_sets; uint min_expected_len = 0; -#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP) uint16 len_chk = 0; -#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */ BCM_REFERENCE(ecntr_pushed); + BCM_REFERENCE(rtt_pushed); + BCM_REFERENCE(len_chk); + + /* store event_logset_queried and event_log_max_sets in local variables + * to avoid race conditions as they were set from different contexts(preinit) + */ + event_log_max_sets_queried = dhdp->event_log_max_sets_queried; + /* Make sure queried is read first with wmb and then max_sets, + * as it is done in reverse order during preinit ioctls. + */ + OSL_SMP_WMB(); + event_log_max_sets = dhdp->event_log_max_sets; if (msgtrace_hdr_present) min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN); @@ -744,12 +848,12 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, hdr = (msgtrace_hdr_t *)event_data; data = (char *)event_data + MSGTRACE_HDRLEN; datalen -= MSGTRACE_HDRLEN; - msgtrace_seqnum = hdr->seqnum; + msgtrace_seqnum = ntoh32(hdr->seqnum); } else { data = (char *)event_data; } - if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(msgtrace_seqnum))) + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum)) return; /* Save the whole message to event log ring */ @@ -786,12 +890,16 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, logset = ltoh32(*((uint32 *)(data + 4))); - if (logset >= NUM_EVENT_LOG_SETS) { - DHD_ERROR(("%s logset: %d max: %d out of range, collect socram\n", - __FUNCTION__, logset, NUM_EVENT_LOG_SETS)); + if (logset >= event_log_max_sets) { + DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n", + __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried)); #ifdef DHD_FW_COREDUMP - dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE; - dhd_bus_mem_dump(dhdp); + if (event_log_max_sets_queried) { + DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n", + __FUNCTION__)); + dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE; + dhd_bus_mem_dump(dhdp); + } #endif /* DHD_FW_COREDUMP */ } @@ -812,6 +920,7 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, * data log_hdr */ dll_init(&list_head); + dll_inited = TRUE; while (datalen > log_hdr_len) { log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len); @@ -835,7 +944,12 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, if (prcd_log_hdr.count == 0) { break; } + /* Both tag_stats and proxd are binary payloads so skip + * argument count check for these. + */ if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) && + (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) && + (prcd_log_hdr.tag != EVENT_LOG_TAG_ROAM_ENHANCED_LOG) && (prcd_log_hdr.count > MAX_NO_OF_ARG)) { break; } @@ -860,6 +974,7 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, log_item->prcd_log_hdr.tag = prcd_log_hdr.tag; log_item->prcd_log_hdr.count = prcd_log_hdr.count; log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num; + log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw; log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle; log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr; log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len; @@ -885,6 +1000,7 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, plog_hdr = &log_item->prcd_log_hdr; #if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP) + /* Ecounter tag can be time_data or log_stats+binary paloaod */ if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) || ((plog_hdr->tag == EVENT_LOG_TAG_STATS) && (plog_hdr->binary_payload))) { @@ -912,6 +1028,37 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, } #endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */ + if (plog_hdr->tag == EVENT_LOG_TAG_ROAM_ENHANCED_LOG) { + print_roam_enhanced_log(plog_hdr); + msg_processed = TRUE; + } +#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP) + if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) && + plog_hdr->binary_payload) { + if (!rtt_pushed && dhd_log_dump_rtt_enabled()) { + /* + * check msg hdr len before pushing. + * FW msg_hdr.len includes length of event log hdr, + * logentry header and payload. + */ + len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) + + PAYLOAD_RTT_MAX_LEN); + /* account extended event log header(extended_event_log_hdr) */ + if (plog_hdr->ext_event_log_hdr) { + len_chk += sizeof(*log_hdr); + } + if (msg_hdr.len > len_chk) { + DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: " + "msg_hdr.len=%u, max allowed for ecntrs=%u\n", + __FUNCTION__, msg_hdr.len, len_chk)); + goto exit; + } + dhd_dbg_ring_push(dhdp->rtt_dbg_ring, &msg_hdr, logbuf); + rtt_pushed = TRUE; + } + } +#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */ + if (!msg_processed) { dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr, logset, block, (uint32 *)data); @@ -923,7 +1070,7 @@ dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, BCM_REFERENCE(log_hdr); exit: - while (!dll_empty(&list_head)) { + while (dll_inited && (!dll_empty(&list_head))) { cur = dll_head_p(&list_head); #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic push @@ -1040,6 +1187,23 @@ dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags return BCME_OK; } +int +__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status) +{ + dhd_dbg_ring_status_t ring_status; + int ret = BCME_OK; + + if (ring == NULL) { + return BCME_BADADDR; + } + + bzero(&ring_status, sizeof(dhd_dbg_ring_status_t)); + RING_STAT_TO_STATUS(ring, ring_status); + *get_ring_status = ring_status; + + return ret; +} + /* * dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer * Return: An error code or 0 on success. @@ -1052,17 +1216,14 @@ dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg int id = 0; dhd_dbg_t *dbg; dhd_dbg_ring_t *dbg_ring; - dhd_dbg_ring_status_t ring_status; if (!dhdp || !dhdp->dbg) return BCME_BADADDR; dbg = dhdp->dbg; - memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t)); for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { dbg_ring = &dbg->dbg_rings[id]; if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) { - RING_STAT_TO_STATUS(dbg_ring, ring_status); - *dbg_ring_status = ring_status; + __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status); break; } } @@ -1073,6 +1234,30 @@ dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg return ret; } +#ifdef SHOW_LOGTRACE +void +dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info) +{ + dhd_dbg_ring_status_t ring_status; + uint32 rlen = 0; + + rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE); + + trace_buf_info->size = rlen; + trace_buf_info->availability = NEXT_BUF_NOT_AVAIL; + if (rlen == 0) { + trace_buf_info->availability = BUF_NOT_AVAILABLE; + return; + } + + __dhd_dbg_get_ring_status(ring, &ring_status); + + if (ring_status.written_bytes != ring_status.read_bytes) { + trace_buf_info->availability = NEXT_BUF_AVAIL; + } +} +#endif /* SHOW_LOGTRACE */ + /* * dhd_dbg_find_ring_id : return ring_id based on ring_name * Return: An invalid ring id for failure or valid ring id on success. @@ -1176,9 +1361,9 @@ __dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid) uint32 __dhd_dbg_driver_ts_usec(void) { - struct timespec ts; + struct osl_timespec ts; - get_monotonic_boottime(&ts); + osl_get_monotonic_boottime(&ts); return ((uint32)(__TIMESPEC_TO_US(ts))); } @@ -1971,6 +2156,9 @@ dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__)); return BCME_OK; } +#endif /* DBG_PKT_MON */ + +#if defined(DBG_PKT_MON) bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, uint16 status) @@ -1982,17 +2170,147 @@ dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, } return pkt_fate; } - -#else /* DBG_PKT_MON */ - +#else /* DBG_PKT_MON || DHD_PKT_LOGGING */ bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, uint16 status) { return TRUE; } +#endif // endif -#endif /* DBG_PKT_MON */ +#define EL_LOG_STR_LEN 512 + +void +print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr) +{ + prsv_periodic_log_hdr_t *hdr = (prsv_periodic_log_hdr_t *)plog_hdr->log_ptr; + char chanspec_buf[CHANSPEC_STR_LEN]; + + if (hdr->version != ROAM_LOG_VER_1) { + DHD_ERROR(("ROAM_LOG ENHANCE: version is not matched\n")); + goto default_print; + return; + } + + switch (hdr->id) { + case ROAM_LOG_SCANSTART: + { + roam_log_trig_v1_t *log = (roam_log_trig_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_SCANSTART time: %d," + " version:%d reason: %d rssi:%d cu:%d result:%d\n", + plog_hdr->armcycle, log->hdr.version, log->reason, + log->rssi, log->current_cu, log->result)); + if (log->reason == WLC_E_REASON_DEAUTH || + log->reason == WLC_E_REASON_DISASSOC) { + DHD_ERROR((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n", + log->prt_roam.rcvd_reason)); + } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) { + DHD_ERROR((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d " + "duration disassoc:%d valid:%d term:%d\n", + log->bss_trans.req_mode, log->bss_trans.nbrlist_size, + log->bss_trans.token, log->bss_trans.disassoc_dur, + log->bss_trans.validity_dur, log->bss_trans.bss_term_dur)); + } + break; + } + case ROAM_LOG_SCAN_CMPLT: + { + int i; + roam_log_scan_cmplt_v1_t *log = + (roam_log_scan_cmplt_v1_t *)plog_hdr->log_ptr; + + DHD_ERROR(("ROAM_LOG_SCAN_CMPL: time:%d version:%d" + "is_full:%d scan_count:%d score_delta:%d", + plog_hdr->armcycle, log->hdr.version, log->full_scan, + log->scan_count, log->score_delta)); + DHD_ERROR((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n", + MAC2STRDBG((uint8 *)&log->cur_info.addr), + log->cur_info.rssi, + log->cur_info.score, + wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf))); + for (i = 0; i < log->scan_list_size; i++) { + DHD_ERROR((" ROAM_LOG_CANDIDATE %d: " MACDBG + "rssi:%d score:%d channel:%s TPUT:%dkbps\n", + i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr), + log->scan_list[i].rssi, log->scan_list[i].score, + wf_chspec_ntoa_ex(log->scan_list[i].chanspec, + chanspec_buf), + log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT? + log->scan_list[i].estm_tput:0)); + } + break; + } + case ROAM_LOG_ROAM_CMPLT: + { + roam_log_cmplt_v1_t *log = (roam_log_cmplt_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_ROAM_CMPL: time: %d, version:%d" + "status: %d reason: %d channel:%s retry:%d " MACDBG "\n", + plog_hdr->armcycle, log->hdr.version, log->status, log->reason, + wf_chspec_ntoa_ex(log->chanspec, chanspec_buf), + log->retry, MAC2STRDBG((uint8 *)&log->addr))); + break; + } + case ROAM_LOG_NBR_REQ: + { + roam_log_nbrreq_v1_t *log = (roam_log_nbrreq_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_NBR_REQ: time: %d, version:%d token:%d\n", + plog_hdr->armcycle, log->hdr.version, log->token)); + break; + } + case ROAM_LOG_NBR_REP: + { + roam_log_nbrrep_v1_t *log = (roam_log_nbrrep_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n", + plog_hdr->armcycle, log->hdr.version, log->channel_num)); + break; + } + case ROAM_LOG_BCN_REQ: + { + roam_log_bcnrpt_req_v1_t *log = + (roam_log_bcnrpt_req_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d" + "class:%d num_chan:%d ", + plog_hdr->armcycle, log->hdr.version, + log->result, log->reg, log->channel)); + DHD_ERROR(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d" + "ssid_len:%d\n", log->mode, log->bssid_wild, + log->duration, log->ssid_len)); + break; + } + case ROAM_LOG_BCN_REP: + { + roam_log_bcnrpt_rep_v1_t *log = + (roam_log_bcnrpt_rep_v1_t *)plog_hdr->log_ptr; + DHD_ERROR(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d\n", + plog_hdr->armcycle, log->hdr.version, + log->count)); + break; + } + default: + goto default_print; + } + + return; + +default_print: + { + uint32 *ptr = (uint32 *)plog_hdr->log_ptr; + int i; + int loop_cnt = hdr->length / sizeof(uint32); + struct bcmstrbuf b; + char pr_buf[EL_LOG_STR_LEN] = { 0 }; + + bcm_binit(&b, pr_buf, EL_LOG_STR_LEN); + bcm_bprintf(&b, "ROAM_LOG_UNKNOWN ID:%d ver:%d armcycle:%d", + hdr->id, hdr->version, plog_hdr->armcycle); + for (i = 0; i < loop_cnt && b.size > 0; i++) { + bcm_bprintf(&b, " %x", *ptr); + ptr++; + } + DHD_ERROR(("%s\n", b.origbuf)); + } +} /* * dhd_dbg_attach: initialziation of dhd dbugability module @@ -2020,7 +2338,7 @@ dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, if (!buf) goto error; ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID, - (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf); + (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE); if (ret) goto error; @@ -2032,7 +2350,7 @@ dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, if (!buf) goto error; ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID, - (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf); + (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE); if (ret) goto error; diff --git a/bcmdhd.100.10.315.x/dhd_debug.h b/bcmdhd.100.10.315.x/dhd_debug.h index f850efc..22c0ada 100644 --- a/bcmdhd.100.10.315.x/dhd_debug.h +++ b/bcmdhd.100.10.315.x/dhd_debug.h @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -23,7 +23,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_debug.h 771435 2018-07-10 05:35:24Z $ + * $Id: dhd_debug.h 783721 2018-10-08 13:05:26Z $ */ #ifndef _dhd_debug_h_ @@ -710,6 +710,8 @@ typedef struct { } dhd_dbg_mwli_t; #endif /* DHD_DEBUG */ +#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF + /* LSB 2 bits of format number to identify the type of event log */ #define DHD_EVENT_LOG_HDR_MASK 0x3 @@ -764,6 +766,7 @@ extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start); extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold); extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name); +extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id); extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp); extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len); extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, @@ -773,8 +776,12 @@ int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint bool strip_header); int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data); +int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status); int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status); +#ifdef SHOW_LOGTRACE +void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info); +#endif /* SHOW_LOGTRACE */ #ifdef DBG_PKT_MON extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, diff --git a/bcmdhd.100.10.315.x/dhd_debug_linux.c b/bcmdhd.100.10.315.x/dhd_debug_linux.c index 386fe8a..4493445 100644 --- a/bcmdhd.100.10.315.x/dhd_debug_linux.c +++ b/bcmdhd.100.10.315.x/dhd_debug_linux.c @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -143,7 +143,9 @@ dbg_ring_poll_worker(struct work_struct *work) goto exit; } + DHD_DBG_RING_UNLOCK(ring->lock, flags); rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen); + DHD_DBG_RING_LOCK(ring->lock, flags); if (!ring->sched_pull) { ring->sched_pull = TRUE; diff --git a/bcmdhd.100.10.315.x/dhd_flowring.c b/bcmdhd.100.10.315.x/dhd_flowring.c index ce9a1cf..55a0315 100644 --- a/bcmdhd.100.10.315.x/dhd_flowring.c +++ b/bcmdhd.100.10.315.x/dhd_flowring.c @@ -4,7 +4,7 @@ * Flow rings are transmit traffic (=propagating towards antenna) related entities * * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_flowring.c 765578 2018-06-04 17:10:24Z $ + * $Id: dhd_flowring.c 808473 2019-03-07 07:35:30Z $ */ #include @@ -65,11 +65,7 @@ int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) -#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) -const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; -#else const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; -#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ @@ -270,6 +266,48 @@ dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, } } +uint8 +dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp) +{ + uint8 prio_count = 0; + int i; + // Pick all elements one by one + for (i = 0; i < NUMPRIO; i++) + { + // Check if the picked element is already counted + int j; + for (j = 0; j < i; j++) { + if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) { + break; + } + } + // If not counted earlier, then count it + if (i == j) { + prio_count++; + } + } + +#ifdef DHD_LOSSLESS_ROAMING + /* For LLR, we are using flowring with prio 7 which is not considered + * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded + * prio to PRIO_8021D_NC and send to dhd_flowid_update. + * So add 1 to prio_count. + */ + prio_count++; +#endif /* DHD_LOSSLESS_ROAMING */ + + return prio_count; +} + +uint8 +dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp) +{ + uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp); + uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings; + uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings; + return max_multi_client_flow_rings; +} + /** Initializes data structures of multiple flow rings */ int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) @@ -353,6 +391,10 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp); + dhdp->multi_client_flow_rings = 0U; + #ifdef DHD_LOSSLESS_ROAMING dhdp->dequeue_prec_map = ALLPRIO; #endif // endif @@ -452,6 +494,9 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) dhdp->num_flow_rings = 0U; bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + dhdp->max_multi_client_flow_rings = 0U; + dhdp->multi_client_flow_rings = 0U; + lock = dhdp->flowid_lock; dhdp->flowid_lock = NULL; @@ -518,8 +563,7 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) ASSERT(if_flow_lkup); - if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || - (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { + if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) { #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && is_tdls_destination(dhdp, da)) { @@ -593,7 +637,7 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) if (flowid == FLOWID_INVALID) { MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); - DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__)); + DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__)); return FLOWID_INVALID; } @@ -605,9 +649,8 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || - (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { - /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */ + if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) { + /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */ #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && (is_tdls_destination(dhdp, da))) { @@ -663,7 +706,6 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, flow_ring_table_t *flow_ring_table; unsigned long flags; int ret; - bool is_sta_assoc; DHD_TRACE(("%s\n", __FUNCTION__)); @@ -680,40 +722,50 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { - + bool if_role_multi_client; if_flow_lkup_t *if_flow_lkup; if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; if (!if_flow_lkup[ifindex].status) return BCME_ERROR; - BCM_REFERENCE(is_sta_assoc); + + /* check role for multi client case */ + if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex); + + /* Abort Flowring creation if multi client flowrings crossed the threshold */ +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + if (if_role_multi_client && + (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) { + DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n", + __FUNCTION__, dhdp->multi_client_flow_rings, + dhdp->max_multi_client_flow_rings)); + return BCME_ERROR; + } +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ + + /* Do not create Flowring if peer is not associated */ #if defined(PCIE_FULL_DONGLE) - is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da); - DHD_ERROR_RLMT(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__, - ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role, - is_sta_assoc)); - if (!ETHER_ISMULTI(da) && - ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) || - (FALSE) || -#ifdef WL_NAN - (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_NAN) || -#endif /* WL_NAN */ - (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) && - (!is_sta_assoc)) { - DHD_ERROR_RLMT(("Attempt to send pkt with out peer/scb addition\n")); + if (if_role_multi_client && !ETHER_ISMULTI(da) && + !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) { + DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__)); return BCME_ERROR; } #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */ id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { - DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", + DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n", __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); return BCME_ERROR; } ASSERT(id < dhdp->num_flow_rings); + /* Only after flowid alloc, increment multi_client_flow_rings */ + if (if_role_multi_client) { + dhdp->multi_client_flow_rings++; + } + /* register this flowid in dhd_pub */ dhd_add_flowid(dhdp, ifindex, prio, da, id); @@ -751,7 +803,14 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, return BCME_OK; } else { /* if the Flow id was found in the hash */ - ASSERT(id < dhdp->num_flow_rings); + + if (id >= dhdp->num_flow_rings) { + DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n", + __FUNCTION__, id, dhdp->num_flow_rings)); + *flowid = FLOWID_INVALID; + ASSERT(0); + return BCME_ERROR; + } flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); @@ -883,6 +942,7 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) flow_hash_info_t *cur, *prev; if_flow_lkup_t *if_flow_lkup; unsigned long flags; + bool if_role_multi_client; ASSERT(ifindex < DHD_MAX_IFS); if (ifindex >= DHD_MAX_IFS) @@ -891,6 +951,8 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex); + for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { cur = if_flow_lkup[ifindex].fl_hash[hashix]; @@ -916,6 +978,11 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) prev->next = cur->next; } + /* Decrement multi_client_flow_rings */ + if (if_role_multi_client) { + dhdp->multi_client_flow_rings--; + } + /* deregister flowid from dhd_pub. */ dhd_del_flowid(dhdp, ifindex, flowid); @@ -1007,10 +1074,20 @@ dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; for (id = 0; id < dhdp->num_flow_rings; id++) { + /* + * Send flowring delete request even if flowring status is + * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND + * event comes ahead of flowring create response. + * Otherwise the flowring will not be deleted later as there will not be any + * DISASSOC_IND event. With this change, when create response event comes to DHD, + * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response + * event will come, upon which DHD will delete the flowring. + */ if (flow_ring_table[id].active && (flow_ring_table[id].flow_info.ifindex == ifindex) && (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && - (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) || + (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) { DHD_ERROR(("%s: deleting flowid %d\n", __FUNCTION__, flow_ring_table[id].flowid)); dhd_bus_flow_ring_delete_request(dhdp->bus, @@ -1123,6 +1200,8 @@ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) else bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp); + return BCME_OK; } diff --git a/bcmdhd.100.10.315.x/dhd_flowring.h b/bcmdhd.100.10.315.x/dhd_flowring.h index a2eb4b7..b205d9f 100644 --- a/bcmdhd.100.10.315.x/dhd_flowring.h +++ b/bcmdhd.100.10.315.x/dhd_flowring.h @@ -6,7 +6,7 @@ * Provides type definitions and function prototypes used to create, delete and manage flow rings at * high level. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -29,7 +29,7 @@ * * <> * - * $Id: dhd_flowring.h 761412 2018-05-08 05:34:36Z $ + * $Id: dhd_flowring.h 786596 2018-10-26 22:54:51Z $ */ /**************** @@ -61,10 +61,7 @@ #define FLOW_RING_STATUS_STA_FREEING 7 #define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 -/* Maximum Mu MIMO frame size */ -#ifdef WL_MONITOR -#define DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ 4096 -#endif /* WL_MONITOR */ +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096 #define DHD_FLOW_PRIO_AC_MAP 0 #define DHD_FLOW_PRIO_TID_MAP 1 @@ -80,8 +77,24 @@ #define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) #define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) #define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT) #define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) #define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS) +#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS) +#ifdef WL_NAN +#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN) +#else +#define DHD_IF_ROLE_NAN(pub, idx) (FALSE) +#endif /* WL_NAN */ +#define DHD_IF_ROLE_AWDL(pub, idx) (FALSE) + +#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \ + (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx)) + +#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \ + (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\ + DHD_IF_ROLE_NAN(pub, idx)) + #define DHD_FLOW_RING(dhdp, flowid) \ (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) @@ -202,6 +215,9 @@ typedef struct flow_ring_node { #ifdef IDLE_TX_FLOW_MGMT uint64 last_active_ts; /* contains last active timestamp */ #endif /* IDLE_TX_FLOW_MGMT */ +#ifdef DHD_HP2P + bool hp2p_ring; +#endif /* DHD_HP2P */ } flow_ring_node_t; typedef flow_ring_node_t flow_ring_table_t; diff --git a/bcmdhd.100.10.315.x/dhd_gpio.c b/bcmdhd.100.10.315.x/dhd_gpio.c index 3bd8c59..7b0e65d 100644 --- a/bcmdhd.100.10.315.x/dhd_gpio.c +++ b/bcmdhd.100.10.315.x/dhd_gpio.c @@ -3,11 +3,6 @@ #include #include -#ifdef CUSTOMER_HW_PLATFORM -#include -#define sdmmc_channel sdmmc_device_mmc0 -#endif /* CUSTOMER_HW_PLATFORM */ - #if defined(BUS_POWER_RESTORE) && defined(BCMSDIO) #include #include @@ -73,14 +68,15 @@ dhd_wlan_set_power(int on #endif #endif #if defined(BUS_POWER_RESTORE) -#if defined(BCMSDIO) +#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + mdelay(100); printf("======== mmc_power_restore_host! ========\n"); mmc_power_restore_host(adapter->sdio_func->card->host); } #elif defined(BCMPCIE) - OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */ if (adapter->pci_dev) { + mdelay(100); printf("======== pci_set_power_state PCI_D0! ========\n"); pci_set_power_state(adapter->pci_dev, PCI_D0); if (adapter->pci_saved_state) @@ -97,7 +93,7 @@ dhd_wlan_set_power(int on mdelay(100); } else { #if defined(BUS_POWER_RESTORE) -#if defined(BCMSDIO) +#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { printf("======== mmc_power_save_host! ========\n"); mmc_power_save_host(adapter->sdio_func->card->host); @@ -155,6 +151,8 @@ static int dhd_wlan_set_carddetect(int present) #ifdef CUSTOMER_HW_AMLOGIC sdio_reinit(); #endif +#elif defined(BCMPCIE) + printf("======== Card detection to detect PCIE card! ========\n"); #endif } else { #if defined(BCMSDIO) @@ -168,8 +166,10 @@ static int dhd_wlan_set_carddetect(int present) #endif #elif defined(BCMPCIE) printf("======== Card detection to remove PCIE card! ========\n"); +#ifdef CUSTOMER_HW_AMLOGIC extern_wifi_set_enable(0); mdelay(200); +#endif #endif } #endif /* BUS_POWER_RESTORE */ diff --git a/bcmdhd.100.10.315.x/dhd_ip.c b/bcmdhd.100.10.315.x/dhd_ip.c index 403ad9b..d200987 100644 --- a/bcmdhd.100.10.315.x/dhd_ip.c +++ b/bcmdhd.100.10.315.x/dhd_ip.c @@ -1,7 +1,7 @@ /* * IP Packet Parser Module. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_ip.c 729083 2017-10-30 10:33:47Z $ + * $Id: dhd_ip.c 813282 2019-04-04 09:42:28Z $ */ #include #include @@ -39,11 +39,11 @@ #include -#ifdef DHDTCPACK_SUPPRESS +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) #include #include #include -#endif /* DHDTCPACK_SUPPRESS */ +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ /* special values */ /* 802.3 llc/snap header */ @@ -128,7 +128,7 @@ typedef struct { uint8 supp_cnt; dhd_pub_t *dhdp; #ifndef TCPACK_SUPPRESS_HOLD_HRT - struct timer_list timer; + timer_list_compat_t timer; #else struct tasklet_hrtimer timer; #endif /* TCPACK_SUPPRESS_HOLD_HRT */ @@ -295,13 +295,7 @@ static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, #ifdef BCMPCIE #ifndef TCPACK_SUPPRESS_HOLD_HRT -static void dhd_tcpack_send( -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - struct timer_list *t -#else - ulong data -#endif -) +static void dhd_tcpack_send(ulong data) #else static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer) #endif /* TCPACK_SUPPRESS_HOLD_HRT */ @@ -314,11 +308,7 @@ static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer) unsigned long flags; #ifndef TCPACK_SUPPRESS_HOLD_HRT -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - cur_tbl = from_timer(cur_tbl, t, timer); -#else cur_tbl = (tcpack_info_t *)data; -#endif #else cur_tbl = container_of(timer, tcpack_info_t, timer.timer); #endif /* TCPACK_SUPPRESS_HOLD_HRT */ @@ -513,13 +503,8 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) &tcpack_sup_module->tcpack_info_tbl[i]; tcpack_info_tbl->dhdp = dhdp; #ifndef TCPACK_SUPPRESS_HOLD_HRT -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - timer_setup(&tcpack_info_tbl->timer, dhd_tcpack_send, 0); -#else - init_timer(&tcpack_info_tbl->timer); - tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl; - tcpack_info_tbl->timer.function = dhd_tcpack_send; -#endif + init_timer_compat(&tcpack_info_tbl->timer, + dhd_tcpack_send, tcpack_info_tbl); #else tasklet_hrtimer_init(&tcpack_info_tbl->timer, dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1389,3 +1374,54 @@ exit: return hold; } #endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHDTCPSYNC_FLOOD_BLK +tcp_hdr_flag_t +dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint8 flags; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + return FLAG_OTHERS; + } + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + if (cur_framelen < IPV4_MIN_HEADER_LEN) { + return FLAG_OTHERS; + } + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + return FLAG_OTHERS; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + + flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET]; + + if (flags & TCP_FLAG_SYN) { + if (flags & TCP_FLAG_ACK) { + return FLAG_SYNCACK; + } + return FLAG_SYNC; + } + return FLAG_OTHERS; +} +#endif /* DHDTCPSYNC_FLOOD_BLK */ diff --git a/bcmdhd.100.10.315.x/dhd_ip.h b/bcmdhd.100.10.315.x/dhd_ip.h index 846eb4f..6bda814 100644 --- a/bcmdhd.100.10.315.x/dhd_ip.h +++ b/bcmdhd.100.10.315.x/dhd_ip.h @@ -3,7 +3,7 @@ * * Provides type definitions and function prototypes used to parse ip packet. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,17 +26,17 @@ * * <> * - * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $ + * $Id: dhd_ip.h 790572 2018-11-26 11:03:46Z $ */ #ifndef _dhd_ip_h_ #define _dhd_ip_h_ -#ifdef DHDTCPACK_SUPPRESS +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) #include #include #include -#endif /* DHDTCPACK_SUPPRESS */ +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ typedef enum pkt_frag { @@ -48,6 +48,17 @@ typedef enum pkt_frag extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); +#ifdef DHDTCPSYNC_FLOOD_BLK +typedef enum tcp_hdr_flags { + FLAG_SYNC, + FLAG_SYNCACK, + FLAG_RST, + FLAG_OTHERS +} tcp_hdr_flag_t; + +extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + #ifdef DHDTCPACK_SUPPRESS #define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) /* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */ diff --git a/bcmdhd.100.10.315.x/dhd_linux.c b/bcmdhd.100.10.315.x/dhd_linux.c index cc81943..53952ce 100644 --- a/bcmdhd.100.10.315.x/dhd_linux.c +++ b/bcmdhd.100.10.315.x/dhd_linux.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux.c 771880 2018-07-12 07:25:59Z $ + * $Id: dhd_linux.c 822756 2019-05-30 13:20:26Z $ */ #include @@ -54,7 +54,6 @@ #include #include #include -#include #include #include #include @@ -64,11 +63,10 @@ #include #endif /* ENABLE_ADAPTIVE_SCHED */ #include -#ifdef DHD_DUMP_MNGR #include -#endif /* DHD_DUMP_MNGR */ #include #include +#include #include #include @@ -81,10 +79,10 @@ #include #include <802.3.h> -#include #include #include #include +#include #ifdef DHD_WET #include #endif /* DHD_WET */ @@ -94,6 +92,9 @@ #include #include #include +#ifdef WL_ESCAN +#include +#endif #include #include #include @@ -133,10 +134,13 @@ #include <802.1d.h> #endif /* AMPDU_VO_ENABLE */ -#ifdef DHDTCPACK_SUPPRESS +#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK) #include -#endif /* DHDTCPACK_SUPPRESS */ +#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */ #include +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +#include +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ #ifdef DHD_DEBUG_PAGEALLOC typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); @@ -145,92 +149,23 @@ extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); #define IP_PROT_RESERVED 0xFF -#if defined(DHD_LB) -#if !defined(PCIE_FULL_DONGLE) -#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" -#endif /* !PCIE_FULL_DONGLE */ -#endif /* DHD_LB */ - -#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \ - defined(DHD_LB_STATS) -#if !defined(DHD_LB) -#error "DHD loadbalance derivatives are supported only if DHD_LB is defined" -#endif /* !DHD_LB */ -#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */ - -#if defined(DHD_LB) -/* Dynamic CPU selection for load balancing */ -#include -#include -#include -#include -#include - -#if !defined(DHD_LB_PRIMARY_CPUS) -#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ -#endif // endif -#if !defined(DHD_LB_SECONDARY_CPUS) -#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ -#endif // endif - -#define HIST_BIN_SIZE 9 - -static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); - -#if defined(DHD_LB_TXP) -static void dhd_lb_tx_handler(unsigned long data); -static void dhd_tx_dispatcher_work(struct work_struct * work); -static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); -static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); - -/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ -typedef struct dhd_tx_lb_pkttag_fr { - struct net_device *net; - int ifidx; -} dhd_tx_lb_pkttag_fr_t; +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +static void dhd_m4_state_handler(struct work_struct * work); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ -#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) -#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) - -#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) -#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) -#endif /* DHD_LB_TXP */ -#endif /* DHD_LB */ +#ifdef DHDTCPSYNC_FLOOD_BLK +static void dhd_blk_tsfl_handler(struct work_struct * work); +#endif /* DHDTCPSYNC_FLOOD_BLK */ #ifdef WL_NATOE #include #endif /* WL_NATOE */ -#ifdef WL_MONITOR -#include -#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */ -#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE) -#endif /* WL_MONITOR */ - -#define htod32(i) (i) -#define htod16(i) (i) -#define dtoh32(i) (i) -#define dtoh16(i) (i) -#define htodchanspec(i) (i) -#define dtohchanspec(i) (i) - -#ifdef BLOCK_IPV6_PACKET -#define HEX_PREF_STR "0x" -#define UNI_FILTER_STR "010000000000" -#define ZERO_ADDR_STR "000000000000" -#define ETHER_TYPE_STR "0000" -#define IPV6_FILTER_STR "20" -#define ZERO_TYPE_STR "00" -#endif /* BLOCK_IPV6_PACKET */ - #if defined(SOFTAP) extern bool ap_cfg_running; extern bool ap_fw_loaded; #endif // endif -extern void dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname, - char *dump_data, bool direction); - #ifdef FIX_CPU_MIN_CLOCK #include #endif /* FIX_CPU_MIN_CLOCK */ @@ -258,9 +193,6 @@ static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; #endif // endif #include -#ifdef WL_ESCAN -#include -#endif /* Maximum STA per radio */ #define DHD_MAX_STA 32 @@ -298,27 +230,23 @@ static struct notifier_block dhd_inet6addr_notifier = { static bool dhd_inet6addr_notifier_registered = FALSE; #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#if defined(CONFIG_PM_SLEEP) #include volatile bool dhd_mmc_suspend = FALSE; DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ +#endif /* defined(CONFIG_PM_SLEEP) */ #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) -static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); -#endif // endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +static void dhd_hang_process(struct work_struct *work_data); MODULE_LICENSE("GPL and additional rights"); -#endif /* LinuxVer */ #if defined(MULTIPLE_SUPPLICANT) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) DEFINE_MUTEX(_dhd_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG #define MAX_CONSECUTIVE_HANG_COUNTS 5 @@ -341,16 +269,10 @@ extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx); extern void dhd_wlfc_plat_init(void *dhd); extern void dhd_wlfc_plat_deinit(void *dhd); #endif /* PROP_TXSTATUS */ +#ifdef USE_DYNAMIC_F2_BLKSIZE extern uint sd_f2_blocksize; extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); - -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) -const char * -print_tainted() -{ - return ""; -} -#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ +#endif /* USE_DYNAMIC_F2_BLKSIZE */ /* Linux wireless extension support */ #if defined(WL_WIRELESS_EXT) @@ -376,18 +298,6 @@ extern wl_iw_extra_params_t g_wl_iw_params; #include #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ -#if defined(BCMPCIE) -extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); -#else -extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); -#endif /* OEM_ANDROID && BCMPCIE */ - -#ifdef PKT_FILTER_SUPPORT -extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); -extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); -extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); -#endif // endif - #if defined(PKT_FILTER_SUPPORT) && defined(APF) static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, u8* program, uint32 program_len); @@ -396,80 +306,24 @@ static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id); #endif /* PKT_FILTER_SUPPORT && APF */ -#if defined(ARGOS_NOTIFY_CB) -int argos_register_notifier_init(struct net_device *net); -int argos_register_notifier_deinit(void); - -extern int sec_argos_register_notifier(struct notifier_block *n, char *label); -extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label); - -static int argos_status_notifier_wifi_cb(struct notifier_block *notifier, - unsigned long speed, void *v); -static int argos_status_notifier_p2p_cb(struct notifier_block *notifier, - unsigned long speed, void *v); - -/* PCIe interrupt affinity threshold (Mbps) */ -#define PCIE_IRQ_AFFINITY_THRESHOLD 300 +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP) +static int dhd_wait_for_file_dump(dhd_pub_t *dhdp); +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */ +#if defined(ARGOS_NOTIFY_CB) /* ARGOS notifer data */ static struct notifier_block argos_wifi; /* STA */ static struct notifier_block argos_p2p; /* P2P */ - -typedef struct { - struct net_device *wlan_primary_netdev; - int argos_rps_cpus_enabled; -} argos_rps_ctrl; - argos_rps_ctrl argos_rps_ctrl_data; -#define RPS_TPUT_THRESHOLD 300 -#define DELAY_TO_CLEAR_RPS_CPUS 300 #endif // endif -#if defined(BT_OVER_SDIO) -extern void wl_android_set_wifi_on_flag(bool enable); -#endif /* BT_OVER_SDIO */ - #ifdef DHD_FW_COREDUMP -static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event); #endif /* DHD_FW_COREDUMP */ #ifdef DHD_LOG_DUMP -/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE -* 2: DLD_BUF_TYPE_SPECIAL -*/ -#define DLD_BUFFER_NUM 3 - -#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB -#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */ -#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */ - -#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) -#define LOG_DUMP_GENERAL_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) -#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) -#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024) -#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) -#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) -#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */ -#ifdef CONFIG_LOG_BUF_SHIFT -/* 15% of kernel log buf size, if for example klog buf size is 512KB -* 15% of 512KB ~= 80KB -*/ -#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \ - (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100)) -#endif /* CONFIG_LOG_BUF_SHIFT */ -#define LOG_DUMP_COOKIE_BUFSIZE 1024u struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM]; -static int dld_buf_size[DLD_BUFFER_NUM] = { - LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */ - LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */ - LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */ -}; - -typedef struct { - char *hdr_str; - log_dump_section_type_t sec_type; -} dld_hdr_t; /* Only header for log dump buffers is stored in array * header for sections like 'dhd dump', 'ext trap' @@ -482,13 +336,22 @@ dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = { {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL} }; +static int dld_buf_size[DLD_BUFFER_NUM] = { + LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */ + LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */ + LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */ +}; + static void dhd_log_dump_init(dhd_pub_t *dhd); static void dhd_log_dump_deinit(dhd_pub_t *dhd); static void dhd_log_dump(void *handle, void *event_info, u8 event); static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type); - -#define DHD_PRINT_BUF_NAME_LEN 30 +static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type); +static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size); +void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event); +void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb); static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size); +static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type); #endif /* DHD_LOG_DUMP */ #ifdef DHD_PCIE_NATIVE_RUNTIMEPM @@ -519,77 +382,12 @@ dhd_pub_t *g_dhd_pub = NULL; #include "dhd_bt_interface.h" #endif /* defined (BT_OVER_SDIO) */ -#ifdef SHOW_LOGTRACE -static int dhd_trace_open_proc(struct inode *inode, struct file *file); -ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff); - -static const struct file_operations proc_file_fops = { - .read = dhd_trace_read_proc, - .open = dhd_trace_open_proc, - .release = seq_release, -}; -#endif // endif - #ifdef WL_STATIC_IF bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev); #endif /* WL_STATIC_IF */ atomic_t exit_in_progress = ATOMIC_INIT(0); -typedef struct dhd_if_event { - struct list_head list; - wl_event_data_if_t event; - char name[IFNAMSIZ+1]; - uint8 mac[ETHER_ADDR_LEN]; -} dhd_if_event_t; - -/* Interface control information */ -typedef struct dhd_if { - struct dhd_info *info; /* back pointer to dhd_info */ - /* OS/stack specifics */ - struct net_device *net; - int idx; /* iface idx in dongle */ - uint subunit; /* subunit */ - uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ - bool set_macaddress; - bool set_multicast; - uint8 bssidx; /* bsscfg index for the interface */ - bool attached; /* Delayed attachment when unset */ - bool txflowcontrol; /* Per interface flow control indicator */ - char name[IFNAMSIZ+1]; /* linux interface name */ - char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ - struct net_device_stats stats; -#ifdef PCIE_FULL_DONGLE - struct list_head sta_list; /* sll of associated stations */ - spinlock_t sta_list_lock; /* lock for manipulating sll */ -#endif /* PCIE_FULL_DONGLE */ - uint32 ap_isolate; /* ap-isolation settings */ -#ifdef DHD_L2_FILTER - bool parp_enable; - bool parp_discard; - bool parp_allnode; - arp_table_t *phnd_arp_table; - /* for Per BSS modification */ - bool dhcp_unicast; - bool block_ping; - bool grat_arp; - bool block_tdls; -#endif /* DHD_L2_FILTER */ -#ifdef DHD_MCAST_REGEN - bool mcast_regen_bss_enable; -#endif // endif - bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ - cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ - uint8 tx_paths_active; - bool del_in_progress; - bool static_if; /* used to avoid some operations on static_if */ -} dhd_if_t; - -struct ipv6_work_info_t { - uint8 if_idx; - char ipv6_addr[IPV6_ADDR_LEN]; - unsigned long event; -}; static void dhd_process_daemon_msg(struct sk_buff *skb); static void dhd_destroy_to_notifier_skt(void); static int dhd_create_to_notifier_skt(void); @@ -601,350 +399,7 @@ struct netlink_kernel_cfg dhd_netlink_cfg = { .groups = 1, .input = dhd_process_daemon_msg, }; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ - -typedef struct dhd_dump { - uint8 *buf; - int bufsize; - uint8 *hscb_buf; - int hscb_bufsize; -} dhd_dump_t; - -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM -struct dhd_rx_tx_work { - struct work_struct work; - struct sk_buff *skb; - struct net_device *net; - struct dhd_pub *pub; -}; -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ - -/* When Perimeter locks are deployed, any blocking calls must be preceeded - * with a PERIM UNLOCK and followed by a PERIM LOCK. - * Examples of blocking calls are: schedule_timeout(), down_interruptible(), - * wait_event_timeout(). - */ - -/* Local private structure (extension of pub) */ -typedef struct dhd_info { -#if defined(WL_WIRELESS_EXT) - wl_iw_t iw; /* wireless extensions state (must be first) */ -#endif /* defined(WL_WIRELESS_EXT) */ - dhd_pub_t pub; - /* for supporting multiple interfaces. - * static_ifs hold the net ifaces without valid FW IF - */ - dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; - - wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */ - char fw_path[PATH_MAX]; /* path to firmware image */ - char nv_path[PATH_MAX]; /* path to nvram vars file */ - char clm_path[PATH_MAX]; /* path to clm vars file */ - char conf_path[PATH_MAX]; /* path to config vars file */ -#ifdef DHD_UCODE_DOWNLOAD - char uc_path[PATH_MAX]; /* path to ucode image */ -#endif /* DHD_UCODE_DOWNLOAD */ - - /* serialize dhd iovars */ - struct mutex dhd_iovar_mutex; - - struct semaphore proto_sem; -#ifdef PROP_TXSTATUS - spinlock_t wlfc_spinlock; - -#ifdef BCMDBUS - ulong wlfc_lock_flags; - ulong wlfc_pub_lock_flags; -#endif /* BCMDBUS */ -#endif /* PROP_TXSTATUS */ - wait_queue_head_t ioctl_resp_wait; - wait_queue_head_t d3ack_wait; - wait_queue_head_t dhd_bus_busy_state_wait; - wait_queue_head_t dmaxfer_wait; - uint32 default_wd_interval; - - struct timer_list timer; - bool wd_timer_valid; - struct tasklet_struct tasklet; - spinlock_t sdlock; - spinlock_t txqlock; - spinlock_t dhd_lock; -#ifdef BCMDBUS - ulong txqlock_flags; -#else - - struct semaphore sdsem; - tsk_ctl_t thr_dpc_ctl; - tsk_ctl_t thr_wdt_ctl; -#endif /* BCMDBUS */ - - tsk_ctl_t thr_rxf_ctl; - spinlock_t rxf_lock; - bool rxthread_enabled; - - /* Wakelocks */ -#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - struct wake_lock wl_wifi; /* Wifi wakelock */ - struct wake_lock wl_rxwake; /* Wifi rx wakelock */ - struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ - struct wake_lock wl_wdwake; /* Wifi wd wakelock */ - struct wake_lock wl_evtwake; /* Wifi event wakelock */ - struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ - struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ -#ifdef BCMPCIE_OOB_HOST_WAKE - struct wake_lock wl_intrwake; /* Host wakeup wakelock */ -#endif /* BCMPCIE_OOB_HOST_WAKE */ -#ifdef DHD_USE_SCAN_WAKELOCK - struct wake_lock wl_scanwake; /* Wifi scan wakelock */ -#endif /* DHD_USE_SCAN_WAKELOCK */ -#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - /* net_device interface lock, prevent race conditions among net_dev interface - * calls and wifi_on or wifi_off - */ - struct mutex dhd_net_if_mutex; - struct mutex dhd_suspend_mutex; -#if defined(PKT_FILTER_SUPPORT) && defined(APF) - struct mutex dhd_apf_mutex; -#endif /* PKT_FILTER_SUPPORT && APF */ -#endif // endif - spinlock_t wakelock_spinlock; - spinlock_t wakelock_evt_spinlock; - uint32 wakelock_counter; - int wakelock_wd_counter; - int wakelock_rx_timeout_enable; - int wakelock_ctrl_timeout_enable; - bool waive_wakelock; - uint32 wakelock_before_waive; - - /* Thread to issue ioctl for multicast */ - wait_queue_head_t ctrl_wait; - atomic_t pend_8021x_cnt; - dhd_attach_states_t dhd_state; -#ifdef SHOW_LOGTRACE - dhd_event_log_t event_data; -#endif /* SHOW_LOGTRACE */ - -#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) - struct early_suspend early_suspend; -#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ - -#ifdef ARP_OFFLOAD_SUPPORT - u32 pend_ipaddr; -#endif /* ARP_OFFLOAD_SUPPORT */ -#ifdef DHDTCPACK_SUPPRESS - spinlock_t tcpack_lock; -#endif /* DHDTCPACK_SUPPRESS */ -#ifdef FIX_CPU_MIN_CLOCK - bool cpufreq_fix_status; - struct mutex cpufreq_fix; - struct pm_qos_request dhd_cpu_qos; -#ifdef FIX_BUS_MIN_CLOCK - struct pm_qos_request dhd_bus_qos; -#endif /* FIX_BUS_MIN_CLOCK */ -#endif /* FIX_CPU_MIN_CLOCK */ - void *dhd_deferred_wq; -#ifdef DEBUG_CPU_FREQ - struct notifier_block freq_trans; - int __percpu *new_freq; -#endif // endif - unsigned int unit; - struct notifier_block pm_notifier; -#ifdef DHD_PSTA - uint32 psta_mode; /* PSTA or PSR */ -#endif /* DHD_PSTA */ -#ifdef DHD_WET - uint32 wet_mode; -#endif /* DHD_WET */ -#ifdef DHD_DEBUG - dhd_dump_t *dump; - struct timer_list join_timer; - u32 join_timeout_val; - bool join_timer_active; - uint scan_time_count; - struct timer_list scan_timer; - bool scan_timer_active; -#endif // endif -#if defined(DHD_LB) - /* CPU Load Balance dynamic CPU selection */ - - /* Variable that tracks the currect CPUs available for candidacy */ - cpumask_var_t cpumask_curr_avail; - - /* Primary and secondary CPU mask */ - cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ - cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ - - struct notifier_block cpu_notifier; - - /* Tasklet to handle Tx Completion packet freeing */ - struct tasklet_struct tx_compl_tasklet; - atomic_t tx_compl_cpu; - - /* Tasklet to handle RxBuf Post during Rx completion */ - struct tasklet_struct rx_compl_tasklet; - atomic_t rx_compl_cpu; - - /* Napi struct for handling rx packet sendup. Packets are removed from - * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then - * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled - * to run to rx_napi_cpu. - */ - struct sk_buff_head rx_pend_queue ____cacheline_aligned; - struct sk_buff_head rx_napi_queue ____cacheline_aligned; - struct napi_struct rx_napi_struct ____cacheline_aligned; - atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ - struct net_device *rx_napi_netdev; /* netdev of primary interface */ - - struct work_struct rx_napi_dispatcher_work; - struct work_struct tx_compl_dispatcher_work; - struct work_struct tx_dispatcher_work; - struct work_struct rx_compl_dispatcher_work; - - /* Number of times DPC Tasklet ran */ - uint32 dhd_dpc_cnt; - /* Number of times NAPI processing got scheduled */ - uint32 napi_sched_cnt; - /* Number of times NAPI processing ran on each available core */ - uint32 *napi_percpu_run_cnt; - /* Number of times RX Completions got scheduled */ - uint32 rxc_sched_cnt; - /* Number of times RX Completion ran on each available core */ - uint32 *rxc_percpu_run_cnt; - /* Number of times TX Completions got scheduled */ - uint32 txc_sched_cnt; - /* Number of times TX Completions ran on each available core */ - uint32 *txc_percpu_run_cnt; - /* CPU status */ - /* Number of times each CPU came online */ - uint32 *cpu_online_cnt; - /* Number of times each CPU went offline */ - uint32 *cpu_offline_cnt; - - /* Number of times TX processing run on each core */ - uint32 *txp_percpu_run_cnt; - /* Number of times TX start run on each core */ - uint32 *tx_start_percpu_run_cnt; - - /* Tx load balancing */ - - /* TODO: Need to see if batch processing is really required in case of TX - * processing. In case of RX the Dongle can send a bunch of rx completions, - * hence we took a 3 queue approach - * enque - adds the skbs to rx_pend_queue - * dispatch - uses a lock and adds the list of skbs from pend queue to - * napi queue - * napi processing - copies the pend_queue into a local queue and works - * on it. - * But for TX its going to be 1 skb at a time, so we are just thinking - * of using only one queue and use the lock supported skb queue functions - * to add and process it. If its in-efficient we'll re-visit the queue - * design. - */ - - /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ - /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ - /* - * From the Tasklet that actually sends out data - * copy the list tx_pend_queue into tx_active_queue. There by we need - * to spinlock to only perform the copy the rest of the code ie to - * construct the tx_pend_queue and the code to process tx_active_queue - * can be lockless. The concept is borrowed as is from RX processing - */ - /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ - - /* Control TXP in runtime, enable by default */ - atomic_t lb_txp_active; - - /* - * When the NET_TX tries to send a TX packet put it into tx_pend_queue - * For now, the processing tasklet will also direcly operate on this - * queue - */ - struct sk_buff_head tx_pend_queue ____cacheline_aligned; - - /* cpu on which the DHD Tx is happenning */ - atomic_t tx_cpu; - - /* CPU on which the Network stack is calling the DHD's xmit function */ - atomic_t net_tx_cpu; - - /* Tasklet context from which the DHD's TX processing happens */ - struct tasklet_struct tx_tasklet; - - /* - * Consumer Histogram - NAPI RX Packet processing - * ----------------------------------------------- - * On Each CPU, when the NAPI RX Packet processing call back was invoked - * how many packets were processed is captured in this data structure. - * Now its difficult to capture the "exact" number of packets processed. - * So considering the packet counter to be a 32 bit one, we have a - * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets - * processed is rounded off to the next power of 2 and put in the - * approriate "bin" the value in the bin gets incremented. - * For example, assume that in CPU 1 if NAPI Rx runs 3 times - * and the packet count processed is as follows (assume the bin counters are 0) - * iteration 1 - 10 (the bin counter 2^4 increments to 1) - * iteration 2 - 30 (the bin counter 2^5 increments to 1) - * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) - */ - uint32 *napi_rx_hist[HIST_BIN_SIZE]; - uint32 *txc_hist[HIST_BIN_SIZE]; - uint32 *rxc_hist[HIST_BIN_SIZE]; -#endif /* DHD_LB */ - -#ifdef SHOW_LOGTRACE -#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE - tsk_ctl_t thr_logtrace_ctl; -#else - struct delayed_work event_log_dispatcher_work; -#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ -#endif /* SHOW_LOGTRACE */ - -#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) -#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ - struct kobject dhd_kobj; - struct timer_list timesync_timer; -#if defined(BT_OVER_SDIO) - char btfw_path[PATH_MAX]; -#endif /* defined (BT_OVER_SDIO) */ -#ifdef WL_MONITOR - struct net_device *monitor_dev; /* monitor pseudo device */ - struct sk_buff *monitor_skb; - uint monitor_len; - uint monitor_type; /* monitor pseudo device */ -#endif /* WL_MONITOR */ -#if defined(BT_OVER_SDIO) - struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ - int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ -#endif /* BT_OVER_SDIO */ -#ifdef SHOW_LOGTRACE - struct sk_buff_head evt_trace_queue ____cacheline_aligned; -#endif // endif -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM - struct workqueue_struct *tx_wq; - struct workqueue_struct *rx_wq; -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ -#ifdef DHD_DEBUG_UART - bool duart_execute; -#endif /* DHD_DEBUG_UART */ - struct mutex logdump_lock; - /* indicates mem_dump was scheduled as work queue or called directly */ - bool scheduled_memdump; - /* indicates sssrdump is called directly instead of scheduling work queue */ - bool no_wq_sssrdump; -#if defined(PCIE_FULL_DONGLE) - /* Spinlock used in Linux implementation of dhd_pcie_backplane_access_[un]lock() */ - spinlock_t backplane_access_lock; -#endif /* defined(PCIE_FULL_DONGLE) */ -} dhd_info_t; - -#ifdef WL_MONITOR -#define MONPKT_EXTRA_LEN 48u -#endif /* WL_MONITOR */ - -#define DHDIF_FWDER(dhdif) FALSE +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */ #if defined(BT_OVER_SDIO) /* Flag to indicate if driver is initialized */ @@ -980,9 +435,9 @@ int op_mode = 0; int disable_proptx = 0; module_param(op_mode, int, 0644); extern int wl_control_wl_start(struct net_device *dev); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS)) +#if defined(BCMLXSDMMC) || defined(BCMDBUS) struct semaphore dhd_registration_sem; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +#endif /* BCMXSDMMC */ #ifdef DHD_LOG_DUMP int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE; @@ -998,6 +453,12 @@ int logdump_ecntr_enable = TRUE; int logdump_ecntr_enable = FALSE; #endif /* EWP_ECNTRS_LOGGING */ module_param(logdump_ecntr_enable, int, 0644); +#ifdef EWP_RTT_LOGGING +int logdump_rtt_enable = TRUE; +#else +int logdump_rtt_enable = FALSE; +#endif /* EWP_RTT_LOGGING */ +module_param(logdump_rtt_enable, int, 0644); #endif /* DHD_LOG_DUMP */ #ifdef EWP_EDL int host_edl_support = TRUE; @@ -1025,6 +486,10 @@ extern void dhd_netdev_free(struct net_device *ndev); #endif /* WL_CFG80211 */ static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev); +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) +static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev); +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ + #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) /* update rx_pkt_chainable state of dhd interface */ static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx); @@ -1049,9 +514,10 @@ module_param(dhd_arp_enable, uint, 0); /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ #ifdef ENABLE_ARP_SNOOP_MODE -uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY; +uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY | + ARP_OL_UPDATE_HOST_CACHE); #else -uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE; #endif /* ENABLE_ARP_SNOOP_MODE */ module_param(dhd_arp_mode, uint, 0); @@ -1146,6 +612,7 @@ module_param(enable_msi, uint, 0); #endif /* PCIE_FULL_DONGLE */ #ifdef DHD_SSSR_DUMP +int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len); extern uint support_sssr_dump; module_param(support_sssr_dump, uint, 0); #endif /* DHD_SSSR_DUMP */ @@ -1155,10 +622,10 @@ static int dhd_found = 0; static int instance_base = 0; /* Starting instance number */ module_param(instance_base, int, 0644); -#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) +#if defined(DHD_LB_RXP) static int dhd_napi_weight = 32; module_param(dhd_napi_weight, int, 0644); -#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ +#endif /* DHD_LB_RXP */ #ifdef PCIE_FULL_DONGLE extern int h2d_max_txpost; @@ -1187,62 +654,6 @@ uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */ module_param(tpoweron_scale, uint, 0644); #endif /* FORCE_TPOWERON */ -#ifdef DHD_DHCP_DUMP -struct bootp_fmt { - struct iphdr ip_header; - struct udphdr udp_header; - uint8 op; - uint8 htype; - uint8 hlen; - uint8 hops; - uint32 transaction_id; - uint16 secs; - uint16 flags; - uint32 client_ip; - uint32 assigned_ip; - uint32 server_ip; - uint32 relay_ip; - uint8 hw_address[16]; - uint8 server_name[64]; - uint8 file_name[128]; - uint8 options[312]; -}; - -static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; -static const char dhcp_ops[][10] = { - "NA", "REQUEST", "REPLY" -}; -static const char dhcp_types[][10] = { - "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" -}; -static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx); -#endif /* DHD_DHCP_DUMP */ - -#ifdef FILTER_IE -#define FILTER_IE_PATH "/etc/wifi/filter_ie" -#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */ -#define FILE_BLOCK_READ_SIZE 256 -#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs) -#endif /* FILTER_IE */ - -#define NULL_CHECK(p, s, err) \ - do { \ - if (!(p)) { \ - printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ - err = BCME_ERROR; \ - return err; \ - } \ - } while (0) - -#ifdef DHD_ICMP_DUMP -#include -static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx); -#endif /* DHD_ICMP_DUMP */ - -/* Functions to manage sysfs interface for dhd */ -static int dhd_sysfs_init(dhd_info_t *dhd); -static void dhd_sysfs_exit(dhd_info_t *dhd); - #ifdef SHOW_LOGTRACE static char *logstrs_path = "/data/misc/wifi/logstrs.bin"; char *st_str_file_path = "/data/misc/wifi/rtecdc.bin"; @@ -1265,2129 +676,1022 @@ static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *s char *map_file); #endif /* SHOW_LOGTRACE */ -#if defined(DHD_LB) - -static void -dhd_lb_set_default_cpus(dhd_info_t *dhd) -{ - /* Default CPU allocation for the jobs */ - atomic_set(&dhd->rx_napi_cpu, 1); - atomic_set(&dhd->rx_compl_cpu, 2); - atomic_set(&dhd->tx_compl_cpu, 2); - atomic_set(&dhd->tx_cpu, 2); - atomic_set(&dhd->net_tx_cpu, 0); -} +#ifdef USE_WFA_CERT_CONF +int g_frameburst = 1; +#endif /* USE_WFA_CERT_CONF */ -static void -dhd_cpumasks_deinit(dhd_info_t *dhd) -{ - free_cpumask_var(dhd->cpumask_curr_avail); - free_cpumask_var(dhd->cpumask_primary); - free_cpumask_var(dhd->cpumask_primary_new); - free_cpumask_var(dhd->cpumask_secondary); - free_cpumask_var(dhd->cpumask_secondary_new); -} +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); -static int -dhd_cpumasks_init(dhd_info_t *dhd) -{ - int id; - uint32 cpus, num_cpus = num_possible_cpus(); - int ret = 0; +/* DHD Perimiter lock only used in router with bypass forwarding. */ +#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) +#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) +#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) - DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, - DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); +#ifdef PCIE_FULL_DONGLE +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) - if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { - DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); - ret = -ENOMEM; - goto fail; - } - - cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); - cpumask_clear(dhd->cpumask_primary); - cpumask_clear(dhd->cpumask_secondary); +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ +#endif /* PCIE_FULL_DONGLE */ - if (num_cpus > 32) { - DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); - ASSERT(0); - } +/* Control fw roaming */ +#ifdef BCMCCX +uint dhd_roam_disable = 0; +#else +uint dhd_roam_disable = 0; +#endif /* BCMCCX */ - cpus = DHD_LB_PRIMARY_CPUS; - for (id = 0; id < num_cpus; id++) { - if (isset(&cpus, id)) - cpumask_set_cpu(id, dhd->cpumask_primary); - } +#ifdef BCMDBGFS +extern void dhd_dbgfs_init(dhd_pub_t *dhdp); +extern void dhd_dbgfs_remove(void); +#endif // endif - cpus = DHD_LB_SECONDARY_CPUS; - for (id = 0; id < num_cpus; id++) { - if (isset(&cpus, id)) - cpumask_set_cpu(id, dhd->cpumask_secondary); - } +static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */ +module_param(pcie_txs_metadata_enable, int, 0); - return ret; -fail: - dhd_cpumasks_deinit(dhd); - return ret; -} +/* Control radio state */ +uint dhd_radio_up = 1; -/* - * The CPU Candidacy Algorithm - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * The available CPUs for selection are divided into two groups - * Primary Set - A CPU mask that carries the First Choice CPUs - * Secondary Set - A CPU mask that carries the Second Choice CPUs. - * - * There are two types of Job, that needs to be assigned to - * the CPUs, from one of the above mentioned CPU group. The Jobs are - * 1) Rx Packet Processing - napi_cpu - * 2) Completion Processiong (Tx, RX) - compl_cpu - * - * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes - * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy - * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. - * If there are more processors free, it assigns one to compl_cpu. - * It also tries to ensure that both napi_cpu and compl_cpu are not on the same - * CPU, as much as possible. - * - * By design, both Tx and Rx completion jobs are run on the same CPU core, as it - * would allow Tx completion skb's to be released into a local free pool from - * which the rx buffer posts could have been serviced. it is important to note - * that a Tx packet may not have a large enough buffer for rx posting. - */ -void dhd_select_cpu_candidacy(dhd_info_t *dhd) -{ - uint32 primary_available_cpus; /* count of primary available cpus */ - uint32 secondary_available_cpus; /* count of secondary available cpus */ - uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ - uint32 compl_cpu = 0; /* cpu selected for completion jobs */ - uint32 tx_cpu = 0; /* cpu selected for tx processing job */ +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); - cpumask_clear(dhd->cpumask_primary_new); - cpumask_clear(dhd->cpumask_secondary_new); +/* The following are specific to the SDIO dongle */ - /* - * Now select from the primary mask. Even if a Job is - * already running on a CPU in secondary group, we still move - * to primary CPU. So no conditional checks. - */ - cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, - dhd->cpumask_curr_avail); +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; - cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, - dhd->cpumask_curr_avail); +/* DS Exit response timeout */ +int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; - primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); - if (primary_available_cpus > 0) { - napi_cpu = cpumask_first(dhd->cpumask_primary_new); +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); - /* If no further CPU is available, - * cpumask_next returns >= nr_cpu_ids - */ - tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); - if (tx_cpu >= nr_cpu_ids) - tx_cpu = 0; +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); - /* In case there are no more CPUs, do completions & Tx in same CPU */ - compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new); - if (compl_cpu >= nr_cpu_ids) - compl_cpu = tx_cpu; - } +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); - DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", - __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); - /* -- Now check for the CPUs from the secondary mask -- */ - secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); - DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", - __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); +#endif /* BCMSDIO */ - if (secondary_available_cpus > 0) { - /* At this point if napi_cpu is unassigned it means no CPU - * is online from Primary Group - */ - if (napi_cpu == 0) { - napi_cpu = cpumask_first(dhd->cpumask_secondary_new); - tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); - compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); - } else if (tx_cpu == 0) { - tx_cpu = cpumask_first(dhd->cpumask_secondary_new); - compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); - } else if (compl_cpu == 0) { - compl_cpu = cpumask_first(dhd->cpumask_secondary_new); - } +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); - /* If no CPU was available for tx processing, choose CPU 0 */ - if (tx_cpu >= nr_cpu_ids) - tx_cpu = 0; +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ - /* If no CPU was available for completion, choose CPU 0 */ - if (compl_cpu >= nr_cpu_ids) - compl_cpu = 0; - } - if ((primary_available_cpus == 0) && - (secondary_available_cpus == 0)) { - /* No CPUs available from primary or secondary mask */ - napi_cpu = 1; - compl_cpu = 0; - tx_cpu = 2; - } +#if defined(BCMSUP_4WAY_HANDSHAKE) +/* Use in dongle supplicant for 4-way handshake */ +#if defined(WLFBT) || defined(WL_ENABLE_IDSUP) +/* Enable idsup by default (if supported in fw) */ +uint dhd_use_idsup = 1; +#else +uint dhd_use_idsup = 0; +#endif /* WLFBT || WL_ENABLE_IDSUP */ +module_param(dhd_use_idsup, uint, 0); +#endif /* BCMSUP_4WAY_HANDSHAKE */ - DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", - __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); +#ifndef BCMDBUS +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +module_param(allow_delay_fwdl, int, 0); +#endif /* !BCMDBUS */ - ASSERT(napi_cpu < nr_cpu_ids); - ASSERT(compl_cpu < nr_cpu_ids); - ASSERT(tx_cpu < nr_cpu_ids); +#ifdef ECOUNTER_PERIODIC_DISABLE +uint enable_ecounter = FALSE; +#else +uint enable_ecounter = TRUE; +#endif // endif +module_param(enable_ecounter, uint, 0); - atomic_set(&dhd->rx_napi_cpu, napi_cpu); - atomic_set(&dhd->tx_compl_cpu, compl_cpu); - atomic_set(&dhd->rx_compl_cpu, compl_cpu); - atomic_set(&dhd->tx_cpu, tx_cpu); +/* TCM verification flag */ +uint dhd_tcm_test_enable = FALSE; +module_param(dhd_tcm_test_enable, uint, 0644); - return; -} +extern char dhd_version[]; +extern char fw_version[]; +extern char clm_version[]; -/* - * Function to handle CPU Hotplug notifications. - * One of the task it does is to trigger the CPU Candidacy algorithm - * for load balancing. - */ +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); -int dhd_cpu_startup_callback(unsigned int cpu) -{ - dhd_info_t *dhd = g_dhd_pub->info; +#ifdef DHD_PM_CONTROL_FROM_FILE +bool g_pm_control; +#ifdef DHD_EXPORT_CNTL_FILE +int pmmode_val; +#endif /* DHD_EXPORT_CNTL_FILE */ +void sec_control_pm(dhd_pub_t *dhd, uint *); +#endif /* DHD_PM_CONTROL_FROM_FILE */ - DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); - DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); - cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); - dhd_select_cpu_candidacy(dhd); +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ - return 0; -} +#ifndef BCMDBUS +static void dhd_dpc(ulong data); +#endif /* !BCMDBUS */ +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); -int dhd_cpu_teardown_callback(unsigned int cpu) -{ - dhd_info_t *dhd = g_dhd_pub->info; +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ - DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); - DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); - cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); - dhd_select_cpu_candidacy(dhd); +static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); - return 0; -} -#else -int -dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) { - unsigned long int cpu = (unsigned long int)hcpu; + int ret = NOTIFY_DONE; + bool suspend = FALSE; #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif // endif - dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic pop #endif // endif - if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { - DHD_INFO(("%s(): LB data is not initialized yet.\n", - __FUNCTION__)); - return NOTIFY_BAD; - } + BCM_REFERENCE(dhdinfo); + BCM_REFERENCE(suspend); - switch (action) - { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); - cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); - dhd_select_cpu_candidacy(dhd); - break; + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); - cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); - dhd_select_cpu_candidacy(dhd); - break; - default: - break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; } - return NOTIFY_OK; -} -#endif /* LINUX_VERSION_CODE < 4.10.0 */ - -static int dhd_register_cpuhp_callback(dhd_info_t *dhd) -{ - int cpuhp_ret = 0; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) - cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd", - dhd_cpu_startup_callback, dhd_cpu_teardown_callback); - - if (cpuhp_ret < 0) { - DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n", - __FUNCTION__, cpuhp_ret)); +#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) + if (suspend) { + DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); + dhd_wlfc_suspend(&dhdinfo->pub); + DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); + } else { + dhd_wlfc_resume(&dhdinfo->pub); } -#else - /* - * If we are able to initialize CPU masks, lets register to the - * CPU Hotplug framework to change the CPU for each job dynamically - * using candidacy algorithm. - */ - dhd->cpu_notifier.notifier_call = dhd_cpu_callback; - register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */ -#endif /* LINUX_VERSION_CODE < 4.10.0 */ - return cpuhp_ret; -} +#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ + + dhd_mmc_suspend = suspend; + smp_mb(); -static int dhd_unregister_cpuhp_callback(dhd_info_t *dhd) -{ - int ret = 0; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) - /* Don't want to call tear down while unregistering */ - cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); -#else - if (dhd->cpu_notifier.notifier_call != NULL) { - unregister_cpu_notifier(&dhd->cpu_notifier); - } -#endif // endif return ret; } -#if defined(DHD_LB_STATS) -void dhd_lb_stats_init(dhd_pub_t *dhdp) -{ - dhd_info_t *dhd; - int i, j, num_cpus = num_possible_cpus(); - int alloc_size = sizeof(uint32) * num_cpus; - - if (dhdp == NULL) { - DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", - __FUNCTION__)); - return; - } +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_pm_notifier_registered = FALSE; - dhd = dhdp->info; - if (dhd == NULL) { - DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); - return; - } +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* CONFIG_PM_SLEEP */ - DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); - DHD_LB_STATS_CLR(dhd->napi_sched_cnt); +/* Request scheduling of the bus rx frame */ +static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); +static void dhd_os_rxflock(dhd_pub_t *pub); +static void dhd_os_rxfunlock(dhd_pub_t *pub); - dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->napi_percpu_run_cnt) { - DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void +dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event); +#endif /* DHD_H2D_LOG_TIME_SYNC */ - DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); +/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ +typedef struct dhd_dev_priv { + dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ + dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ + int ifidx; /* interface index */ + void * lkup; +} dhd_dev_priv_t; - dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->rxc_percpu_run_cnt) { - DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); +#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) +#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) +#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) +#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) +#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) - DHD_LB_STATS_CLR(dhd->txc_sched_cnt); +/** Clear the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_clear(struct net_device * dev) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = (dhd_info_t *)NULL; + dev_priv->ifp = (dhd_if_t *)NULL; + dev_priv->ifidx = DHD_BAD_IF; + dev_priv->lkup = (void *)NULL; +} - dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->txc_percpu_run_cnt) { - DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); - - dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->cpu_online_cnt) { - DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); - - dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->cpu_offline_cnt) { - DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); - - dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->txp_percpu_run_cnt) { - DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); - - dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->tx_start_percpu_run_cnt) { - DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", - __FUNCTION__)); - return; - } - for (i = 0; i < num_cpus; i++) - DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); - - for (j = 0; j < HIST_BIN_SIZE; j++) { - dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->napi_rx_hist[j]) { - DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", - __FUNCTION__, j)); - return; - } - for (i = 0; i < num_cpus; i++) { - DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); - } - } -#ifdef DHD_LB_TXC - for (j = 0; j < HIST_BIN_SIZE; j++) { - dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->txc_hist[j]) { - DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n", - __FUNCTION__, j)); - return; - } - for (i = 0; i < num_cpus; i++) { - DHD_LB_STATS_CLR(dhd->txc_hist[j][i]); - } - } -#endif /* DHD_LB_TXC */ -#ifdef DHD_LB_RXC - for (j = 0; j < HIST_BIN_SIZE; j++) { - dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); - if (!dhd->rxc_hist[j]) { - DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n", - __FUNCTION__, j)); - return; - } - for (i = 0; i < num_cpus; i++) { - DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]); - } - } -#endif /* DHD_LB_RXC */ - return; +/** Setup the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, + int ifidx) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = dhd; + dev_priv->ifp = ifp; + dev_priv->ifidx = ifidx; } -void dhd_lb_stats_deinit(dhd_pub_t *dhdp) +/* Return interface pointer */ +struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) { - dhd_info_t *dhd; - int j, num_cpus = num_possible_cpus(); - int alloc_size = sizeof(uint32) * num_cpus; + ASSERT(ifidx < DHD_MAX_IFS); - if (dhdp == NULL) { - DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", - __FUNCTION__)); - return; - } + if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS) + return NULL; - dhd = dhdp->info; - if (dhd == NULL) { - DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); - return; - } + return dhdp->info->iflist[ifidx]; +} - if (dhd->napi_percpu_run_cnt) { - MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); - dhd->napi_percpu_run_cnt = NULL; - } - if (dhd->rxc_percpu_run_cnt) { - MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); - dhd->rxc_percpu_run_cnt = NULL; - } - if (dhd->txc_percpu_run_cnt) { - MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); - dhd->txc_percpu_run_cnt = NULL; - } - if (dhd->cpu_online_cnt) { - MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); - dhd->cpu_online_cnt = NULL; - } - if (dhd->cpu_offline_cnt) { - MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); - dhd->cpu_offline_cnt = NULL; - } +#ifdef PCIE_FULL_DONGLE - if (dhd->txp_percpu_run_cnt) { - MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); - dhd->txp_percpu_run_cnt = NULL; - } - if (dhd->tx_start_percpu_run_cnt) { - MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); - dhd->tx_start_percpu_run_cnt = NULL; - } +/** Dummy objects are defined with state representing bad|down. + * Performance gains from reducing branch conditionals, instruction parallelism, + * dual issue, reducing load shadows, avail of larger pipelines. + * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer + * is accessed via the dhd_sta_t. + */ - for (j = 0; j < HIST_BIN_SIZE; j++) { - if (dhd->napi_rx_hist[j]) { - MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); - dhd->napi_rx_hist[j] = NULL; - } -#ifdef DHD_LB_TXC - if (dhd->txc_hist[j]) { - MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size); - dhd->txc_hist[j] = NULL; - } -#endif /* DHD_LB_TXC */ -#ifdef DHD_LB_RXC - if (dhd->rxc_hist[j]) { - MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size); - dhd->rxc_hist[j] = NULL; - } -#endif /* DHD_LB_RXC */ +/* Dummy dhd_info object */ +dhd_info_t dhd_info_null = { + .pub = { + .info = &dhd_info_null, +#ifdef DHDTCPACK_SUPPRESS + .tcpack_sup_mode = TCPACK_SUP_REPLACE, +#endif /* DHDTCPACK_SUPPRESS */ + .up = FALSE, + .busstate = DHD_BUS_DOWN } +}; +#define DHD_INFO_NULL (&dhd_info_null) +#define DHD_PUB_NULL (&dhd_info_null.pub) - return; -} - -static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp, - struct bcmstrbuf *strbuf, uint32 **hist) -{ - int i, j; - uint32 *per_cpu_total; - uint32 total = 0; - uint32 num_cpus = num_possible_cpus(); - - per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus); - if (!per_cpu_total) { - DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); - return; - } - bzero(per_cpu_total, sizeof(uint32) * num_cpus); +/* Dummy netdevice object */ +struct net_device dhd_net_dev_null = { + .reg_state = NETREG_UNREGISTERED +}; +#define DHD_NET_DEV_NULL (&dhd_net_dev_null) - bcm_bprintf(strbuf, "CPU: \t\t"); - for (i = 0; i < num_cpus; i++) - bcm_bprintf(strbuf, "%d\t", i); - bcm_bprintf(strbuf, "\nBin\n"); +/* Dummy dhd_if object */ +dhd_if_t dhd_if_null = { +#ifdef WMF + .wmf = { .wmf_enable = TRUE }, +#endif // endif + .info = DHD_INFO_NULL, + .net = DHD_NET_DEV_NULL, + .idx = DHD_BAD_IF +}; +#define DHD_IF_NULL (&dhd_if_null) - for (i = 0; i < HIST_BIN_SIZE; i++) { - bcm_bprintf(strbuf, "%d:\t\t", 1<osh, per_cpu_total, sizeof(uint32) * num_cpus); - per_cpu_total = NULL; - } - return; -} +/** Interface STA list management. */ -static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) -{ - int i, num_cpus = num_possible_cpus(); +/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ +static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); +static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); - bcm_bprintf(strbuf, "CPU: \t"); - for (i = 0; i < num_cpus; i++) - bcm_bprintf(strbuf, "%d\t", i); - bcm_bprintf(strbuf, "\n"); +/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ +static void dhd_if_del_sta_list(dhd_if_t * ifp); +static void dhd_if_flush_sta(dhd_if_t * ifp); - bcm_bprintf(strbuf, "Val: \t"); - for (i = 0; i < num_cpus; i++) - bcm_bprintf(strbuf, "%u\t", *(p+i)); - bcm_bprintf(strbuf, "\n"); - return; -} +/* Construct/Destruct a sta pool. */ +static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); +static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); -void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +/** Reset a dhd_sta object and free into the dhd pool. */ +static void +dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) { - dhd_info_t *dhd; - - if (dhdp == NULL || strbuf == NULL) { - DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", - __FUNCTION__, dhdp, strbuf)); - return; - } + int prio; - dhd = dhdp->info; - if (dhd == NULL) { - DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); - return; - } + ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); - bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); - bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; - bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", - dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, - dhd->txc_sched_cnt); + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_ring_node_t * flow_ring_node; -#ifdef DHD_LB_RXP - bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); - bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); - dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist); -#endif /* DHD_LB_RXP */ +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ -#ifdef DHD_LB_RXC - bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); - bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); - dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist); -#endif /* DHD_LB_RXC */ + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + if (flow_ring_node) { + flow_queue_t *queue = &flow_ring_node->queue; -#ifdef DHD_LB_TXC - bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); - bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); - dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist); -#endif /* DHD_LB_TXC */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; -#ifdef DHD_LB_TXP - bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != + NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } - bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); - dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); -#endif /* DHD_LB_TXP */ -} + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + } -/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ -static inline uint32 next_larger_power2(uint32 num) -{ - num--; - num |= (num >> 1); - num |= (num >> 2); - num |= (num >> 4); - num |= (num >> 8); - num |= (num >> 16); + sta->flowid[prio] = FLOWID_INVALID; + } - return (num + 1); + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ + sta->ifidx = DHD_BAD_IF; + bzero(sta->ea.octet, ETHER_ADDR_LEN); + INIT_LIST_HEAD(&sta->list); + sta->idx = ID16_INVALID; /* implying free */ } -static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) +/** Allocate a dhd_sta object from the dhd pool. */ +static dhd_sta_t * +dhd_sta_alloc(dhd_pub_t * dhdp) { - uint32 bin_power; - uint32 *p; - bin_power = next_larger_power2(count); - - switch (bin_power) { - case 1: p = bin[0] + cpu; break; - case 2: p = bin[1] + cpu; break; - case 4: p = bin[2] + cpu; break; - case 8: p = bin[3] + cpu; break; - case 16: p = bin[4] + cpu; break; - case 32: p = bin[5] + cpu; break; - case 64: p = bin[6] + cpu; break; - case 128: p = bin[7] + cpu; break; - default : p = bin[8] + cpu; break; - } + uint16 idx; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; - *p = *p + 1; - return; -} + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); -extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) -{ - int cpu; - dhd_info_t *dhd = dhdp->info; + idx = id16_map_alloc(dhdp->staid_allocator); + if (idx == ID16_INVALID) { + DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); + return DHD_STA_NULL; + } - cpu = get_cpu(); - put_cpu(); - dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); + sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); + sta = &sta_pool[idx]; - return; -} + ASSERT((sta->idx == ID16_INVALID) && + (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); -extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) -{ - int cpu; - dhd_info_t *dhd = dhdp->info; + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); - cpu = get_cpu(); - put_cpu(); - dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); + sta->idx = idx; /* implying allocated */ - return; + return sta; } -extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +/** Delete all STAs in an interface's STA list. */ +static void +dhd_if_del_sta_list(dhd_if_t *ifp) { - int cpu; - dhd_info_t *dhd = dhdp->info; + dhd_sta_t *sta, *next; + unsigned long flags; - cpu = get_cpu(); - put_cpu(); - dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); return; } -extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ +static void +dhd_if_flush_sta(dhd_if_t * ifp) { - dhd_info_t *dhd = dhdp->info; - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); } -extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) -{ - dhd_info_t *dhd = dhdp->info; - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); -} -#endif /* DHD_LB_STATS */ +/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ +static int +dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void * staid_allocator; -#endif /* DHD_LB */ + ASSERT(dhdp != (dhd_pub_t *)NULL); + ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); -#ifdef USE_WFA_CERT_CONF -int g_frameburst = 1; -#endif /* USE_WFA_CERT_CONF */ + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); + if (staid_allocator == NULL) { + DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); + return BCME_ERROR; + } -static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + /* Pre allocate a pool of dhd_sta objects (one extra). */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ + sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); + if (sta_pool == NULL) { + DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); + id16_map_fini(dhdp->osh, staid_allocator); + return BCME_ERROR; + } -/* DHD Perimiter lock only used in router with bypass forwarding. */ -#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) -#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) -#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) + dhdp->sta_pool = sta_pool; + dhdp->staid_allocator = staid_allocator; -#ifdef PCIE_FULL_DONGLE -#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) -#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ - spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) -#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ - spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + /* Initialize all sta(s) for the pre-allocated free pool. */ + bzero((uchar *)sta_pool, sta_pool_memsz); + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } -#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) -static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, - struct list_head *snapshot_list); -static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); -#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) -#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) -#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ -#endif /* PCIE_FULL_DONGLE */ + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } -/* Control fw roaming */ -#ifdef BCMCCX -uint dhd_roam_disable = 0; -#else -uint dhd_roam_disable = 0; -#endif /* BCMCCX */ + return BCME_OK; +} -#ifdef BCMDBGFS -extern void dhd_dbgfs_init(dhd_pub_t *dhdp); -extern void dhd_dbgfs_remove(void); -#endif // endif +/** Destruct the pool of dhd_sta_t objects. + * Caller must ensure that no STA objects are currently associated with an if. + */ +static void +dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) +{ + dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; -static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */ -module_param(pcie_txs_metadata_enable, int, 0); + if (sta_pool) { + int idx; + int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + for (idx = 1; idx <= max_sta; idx++) { + ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); + ASSERT(sta_pool[idx].idx == ID16_INVALID); + } + MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); + dhdp->sta_pool = NULL; + } -/* Control radio state */ -uint dhd_radio_up = 1; + id16_map_fini(dhdp->osh, dhdp->staid_allocator); + dhdp->staid_allocator = NULL; +} -/* Network inteface name */ -char iface_name[IFNAMSIZ] = {'\0'}; -module_param_string(iface_name, iface_name, IFNAMSIZ, 0); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void +dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void *staid_allocator; -/* The following are specific to the SDIO dongle */ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } -/* IOCTL response timeout */ -int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + staid_allocator = dhdp->staid_allocator; -/* DS Exit response timeout */ -int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; + if (!sta_pool) { + DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); + return; + } -/* Idle timeout for backplane clock */ -int dhd_idletime = DHD_IDLETIME_TICKS; -module_param(dhd_idletime, int, 0); + if (!staid_allocator) { + DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); + return; + } -/* Use polling */ -uint dhd_poll = FALSE; -module_param(dhd_poll, uint, 0); + /* clear free pool */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + bzero((uchar *)sta_pool, sta_pool_memsz); -/* Use interrupts */ -uint dhd_intr = TRUE; -module_param(dhd_intr, uint, 0); + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + id16_map_clear(staid_allocator, max_sta, 1); -/* SDIO Drive Strength (in milliamps) */ -uint dhd_sdiod_drive_strength = 6; -module_param(dhd_sdiod_drive_strength, uint, 0); + /* Initialize all sta(s) for the pre-allocated free pool. */ + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } +} -#ifdef BCMSDIO -/* Tx/Rx bounds */ -extern uint dhd_txbound; -extern uint dhd_rxbound; -module_param(dhd_txbound, uint, 0); -module_param(dhd_rxbound, uint, 0); +/** Find STA with MAC address ea in an interface's STA list. */ +dhd_sta_t * +dhd_find_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; -/* Deferred transmits */ -extern uint dhd_deferred_tx; -module_param(dhd_deferred_tx, uint, 0); + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; -#endif /* BCMSDIO */ + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(sta, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_INFO(("%s: Found STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); -#ifdef SDTEST -/* Echo packet generator (pkts/s) */ -uint dhd_pktgen = 0; -module_param(dhd_pktgen, uint, 0); + return DHD_STA_NULL; +} -/* Echo packet len (0 => sawtooth, max 2040) */ -uint dhd_pktgen_len = 0; -module_param(dhd_pktgen_len, uint, 0); -#endif /* SDTEST */ +/** Add STA into the interface's STA list. */ +dhd_sta_t * +dhd_add_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; -#if defined(BCMSUP_4WAY_HANDSHAKE) -/* Use in dongle supplicant for 4-way handshake */ -#if defined(WLFBT) || defined(WL_ENABLE_IDSUP) -/* Enable idsup by default (if supported in fw) */ -uint dhd_use_idsup = 1; -#else -uint dhd_use_idsup = 0; -#endif /* WLFBT || WL_ENABLE_IDSUP */ -module_param(dhd_use_idsup, uint, 0); -#endif /* BCMSUP_4WAY_HANDSHAKE */ + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; -#ifndef BCMDBUS -/* Allow delayed firmware download for debug purpose */ -int allow_delay_fwdl = FALSE; -module_param(allow_delay_fwdl, int, 0); -#endif /* !BCMDBUS */ + if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea)); + return DHD_STA_NULL; + } -#ifdef ECOUNTER_PERIODIC_DISABLE -uint enable_ecounter = FALSE; -#else -uint enable_ecounter = TRUE; -#endif // endif -module_param(enable_ecounter, uint, 0); + sta = dhd_sta_alloc((dhd_pub_t *)pub); + if (sta == DHD_STA_NULL) { + DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); + return DHD_STA_NULL; + } -extern char dhd_version[]; -extern char fw_version[]; -extern char clm_version[]; - -int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); -static void dhd_net_if_lock_local(dhd_info_t *dhd); -static void dhd_net_if_unlock_local(dhd_info_t *dhd); -static void dhd_suspend_lock(dhd_pub_t *dhdp); -static void dhd_suspend_unlock(dhd_pub_t *dhdp); + memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); -/* Monitor interface */ -int dhd_monitor_init(void *dhd_pub); -int dhd_monitor_uninit(void); + /* link the sta and the dhd interface */ + sta->ifp = ifp; + sta->ifidx = ifidx; + INIT_LIST_HEAD(&sta->list); -#ifdef DHD_PM_CONTROL_FROM_FILE -bool g_pm_control; -void sec_control_pm(dhd_pub_t *dhd, uint *); -#endif /* DHD_PM_CONTROL_FROM_FILE */ + DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(WL_WIRELESS_EXT) -struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); -#endif /* defined(WL_WIRELESS_EXT) */ + list_add_tail(&sta->list, &ifp->sta_list); -#ifndef BCMDBUS -static void dhd_dpc(ulong data); -#endif /* !BCMDBUS */ -/* forward decl */ -extern int dhd_wait_pend8021x(struct net_device *dev); -void dhd_os_wd_timer_extend(void *bus, bool extend); + DHD_ERROR(("%s: Adding STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); -#ifdef TOE -#ifndef BDC -#error TOE requires BDC -#endif /* !BDC */ -static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); -static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); -#endif /* TOE */ + DHD_IF_STA_LIST_UNLOCK(ifp, flags); -static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, - wl_event_msg_t *event_ptr, void **data_ptr); + return sta; +} -#if defined(CONFIG_PM_SLEEP) -static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +/** Delete all STAs from the interface's STA list. */ +void +dhd_del_all_sta(void *pub, int ifidx) { - int ret = NOTIFY_DONE; - bool suspend = FALSE; + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + DHD_IF_STA_LIST_LOCK(ifp, flags); #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif // endif - dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, + ifp->phnd_arp_table, FALSE, + sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + } #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic pop #endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); - BCM_REFERENCE(dhdinfo); - BCM_REFERENCE(suspend); + return; +} - switch (action) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - suspend = TRUE; - break; +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: - suspend = FALSE; - break; - } + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; -#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) - if (suspend) { - DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); - dhd_wlfc_suspend(&dhdinfo->pub); - DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); - } else { - dhd_wlfc_resume(&dhdinfo->pub); + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Deleting STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(sta->ea.octet))); + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } } -#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ - KERNEL_VERSION(2, 6, 39)) - dhd_mmc_suspend = suspend; - smp_mb(); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop #endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} - return ret; +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; } -/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be - * created in kernel notifier link list (with 'next' pointing to itself) - */ -static bool dhd_pm_notifier_registered = FALSE; +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; -extern int register_pm_notifier(struct notifier_block *nb); -extern int unregister_pm_notifier(struct notifier_block *nb); -#endif /* CONFIG_PM_SLEEP */ + INIT_LIST_HEAD(snapshot_list); -/* Request scheduling of the bus rx frame */ -static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); -static void dhd_os_rxflock(dhd_pub_t *pub); -static void dhd_os_rxfunlock(dhd_pub_t *pub); + DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(DHD_H2D_LOG_TIME_SYNC) -static void -dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event); -#endif /* DHD_H2D_LOG_TIME_SYNC */ + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } -/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ -typedef struct dhd_dev_priv { - dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ - dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ - int ifidx; /* interface index */ - void * lkup; -} dhd_dev_priv_t; + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); -#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) -#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) -#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) -#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) -#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) -#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } -#if defined(DHD_OF_SUPPORT) -extern int dhd_wlan_init(void); -#endif /* defined(DHD_OF_SUPPORT) */ -/** Clear the dhd net_device's private structure. */ -static inline void -dhd_dev_priv_clear(struct net_device * dev) -{ - dhd_dev_priv_t * dev_priv; - ASSERT(dev != (struct net_device *)NULL); - dev_priv = DHD_DEV_PRIV(dev); - dev_priv->dhd = (dhd_info_t *)NULL; - dev_priv->ifp = (dhd_if_t *)NULL; - dev_priv->ifidx = DHD_BAD_IF; - dev_priv->lkup = (void *)NULL; + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return snapshot_list; } -/** Setup the dhd net_device's private structure. */ -static inline void -dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, - int ifidx) +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) { - dhd_dev_priv_t * dev_priv; - ASSERT(dev != (struct net_device *)NULL); - dev_priv = DHD_DEV_PRIV(dev); - dev_priv->dhd = dhd; - dev_priv->ifp = ifp; - dev_priv->ifidx = ifidx; + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } } +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ -/* Return interface pointer */ -static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) -{ - ASSERT(ifidx < DHD_MAX_IFS); +#else +static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ - if (ifidx >= DHD_MAX_IFS) - return NULL; +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) +void +dhd_axi_error_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + schedule_work(&dhd->axi_error_dispatcher_work); +} - return dhdp->info->iflist[ifidx]; +static void dhd_axi_error_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, axi_error_dispatcher_work); + dhd_axi_error(&dhd->pub); } +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ -#ifdef PCIE_FULL_DONGLE +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ +int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhd = dhdp->info; + int i; -/** Dummy objects are defined with state representing bad|down. - * Performance gains from reducing branch conditionals, instruction parallelism, - * dual issue, reducing load shadows, avail of larger pipelines. - * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer - * is accessed via the dhd_sta_t. - */ + ASSERT(bssidx < DHD_MAX_IFS); + ASSERT(dhdp); -/* Dummy dhd_info object */ -dhd_info_t dhd_info_null = { - .pub = { - .info = &dhd_info_null, -#ifdef DHDTCPACK_SUPPRESS - .tcpack_sup_mode = TCPACK_SUP_REPLACE, -#endif /* DHDTCPACK_SUPPRESS */ - .up = FALSE, - .busstate = DHD_BUS_DOWN + for (i = 0; i < DHD_MAX_IFS; i++) { + ifp = dhd->iflist[i]; + if (ifp && (ifp->bssidx == bssidx)) { + DHD_TRACE(("Index manipulated for %s from %d to %d\n", + ifp->name, bssidx, i)); + break; + } } -}; -#define DHD_INFO_NULL (&dhd_info_null) -#define DHD_PUB_NULL (&dhd_info_null.pub) - -/* Dummy netdevice object */ -struct net_device dhd_net_dev_null = { - .reg_state = NETREG_UNREGISTERED -}; -#define DHD_NET_DEV_NULL (&dhd_net_dev_null) - -/* Dummy dhd_if object */ -dhd_if_t dhd_if_null = { -#ifdef WMF - .wmf = { .wmf_enable = TRUE }, -#endif // endif - .info = DHD_INFO_NULL, - .net = DHD_NET_DEV_NULL, - .idx = DHD_BAD_IF -}; -#define DHD_IF_NULL (&dhd_if_null) - -#define DHD_STA_NULL ((dhd_sta_t *)NULL) + return i; +} -/** Interface STA list management. */ +static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) +{ + uint32 store_idx; + uint32 sent_idx; -/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ -static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); -static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); + if (!skb) { + DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); + return BCME_ERROR; + } -/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ -static void dhd_if_del_sta_list(dhd_if_t * ifp); -static void dhd_if_flush_sta(dhd_if_t * ifp); + dhd_os_rxflock(dhdp); + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + if (dhdp->skbbuf[store_idx] != NULL) { + /* Make sure the previous packets are processed */ + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + /* removed msleep here, should use wait_event_timeout if we + * want to give rx frame thread a chance to run + */ +#if defined(WAIT_DEQUEUE) + OSL_SLEEP(1); +#endif // endif + return BCME_ERROR; + } + DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); + dhdp->skbbuf[store_idx] = skb; + dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + dhd_os_rxfunlock(dhdp); -/* Construct/Destruct a sta pool. */ -static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); -static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); -/* Clear the pool of dhd_sta_t objects for built-in type driver */ -static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); + return BCME_OK; +} -/** Reset a dhd_sta object and free into the dhd pool. */ -static void -dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) +static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) { - int prio; - - ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); - - ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + uint32 store_idx; + uint32 sent_idx; + void *skb; - /* - * Flush and free all packets in all flowring's queues belonging to sta. - * Packets in flow ring will be flushed later. - */ - for (prio = 0; prio < (int)NUMPRIO; prio++) { - uint16 flowid = sta->flowid[prio]; + dhd_os_rxflock(dhdp); - if (flowid != FLOWID_INVALID) { - unsigned long flags; - flow_ring_node_t * flow_ring_node; + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + skb = dhdp->skbbuf[sent_idx]; -#ifdef DHDTCPACK_SUPPRESS - /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, - * when there is a newly coming packet from network stack. - */ - dhd_tcpack_info_tbl_clean(dhdp); -#endif /* DHDTCPACK_SUPPRESS */ + if (skb == NULL) { + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx)); + return NULL; + } - flow_ring_node = dhd_flow_ring_node(dhdp, flowid); - if (flow_ring_node) { - flow_queue_t *queue = &flow_ring_node->queue; + dhdp->skbbuf[sent_idx] = NULL; + dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); - DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", + skb, sent_idx)); - if (!DHD_FLOW_QUEUE_EMPTY(queue)) { - void * pkt; - while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != - NULL) { - PKTFREE(dhdp->osh, pkt, TRUE); - } - } + dhd_os_rxfunlock(dhdp); - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); - } - } + return skb; +} - sta->flowid[prio] = FLOWID_INVALID; +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) +{ + if (prepost) { /* pre process */ + dhd_read_cis(dhdp); + dhd_check_module_cid(dhdp); + dhd_check_module_mac(dhdp); + dhd_set_macaddr_from_file(dhdp); + } else { /* post process */ + dhd_write_macaddr(&dhdp->mac); + dhd_clear_cis(dhdp); } - id16_map_free(dhdp->staid_allocator, sta->idx); - DHD_CUMM_CTR_INIT(&sta->cumm_ctr); - sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ - sta->ifidx = DHD_BAD_IF; - bzero(sta->ea.octet, ETHER_ADDR_LEN); - INIT_LIST_HEAD(&sta->list); - sta->idx = ID16_INVALID; /* implying free */ + return 0; } -/** Allocate a dhd_sta object from the dhd pool. */ -static dhd_sta_t * -dhd_sta_alloc(dhd_pub_t * dhdp) +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP) +static int dhd_wait_for_file_dump(dhd_pub_t *dhdp) { - uint16 idx; - dhd_sta_t * sta; - dhd_sta_pool_t * sta_pool; + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; + unsigned long flags = 0; + primary_ndev = dhd_linux_get_primary_netdev(dhdp); - ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return BCME_ERROR; + } + cfg = wl_get_cfg(primary_ndev); - idx = id16_map_alloc(dhdp->staid_allocator); - if (idx == ID16_INVALID) { - DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); - return DHD_STA_NULL; + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return BCME_ERROR; } - sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); - sta = &sta_pool[idx]; + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); + return BCME_ERROR; + } + DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); - ASSERT((sta->idx == ID16_INVALID) && - (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + DHD_OS_WAKE_LOCK(dhdp); + /* check for hal started and only then send event if not clear dump state here */ + if (wl_cfg80211_is_hal_started(cfg)) { + int timeleft = 0; - DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__)); + dhd_dbg_send_urgent_evt(dhdp, NULL, 0); - sta->idx = idx; /* implying allocated */ + DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + timeleft = dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); + if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) { + DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + } + } else { + DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__)); + } + DHD_OS_WAKE_UNLOCK(dhdp); + /* In case of dhd_os_busbusy_wait_bitmask() timeout, + * hal dump bit will not be cleared. Hence clearing it here. + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); - return sta; + return BCME_OK; } +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */ -/** Delete all STAs in an interface's STA list. */ -static void -dhd_if_del_sta_list(dhd_if_t *ifp) +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(PKT_FILTER_SUPPORT) +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +static bool +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param) { - dhd_sta_t *sta, *next; - unsigned long flags; - - DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { - list_del(&sta->list); - dhd_sta_free(&ifp->info->pub, sta); + bool _apply = FALSE; + /* In case of IBSS mode, apply arp pkt filter */ + if (op_mode_param & DHD_FLAG_IBSS_MODE) { + _apply = TRUE; + goto exit; + } + /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ + if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) { + _apply = TRUE; + goto exit; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - return; +exit: + return _apply; } +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ -/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ -static void -dhd_if_flush_sta(dhd_if_t * ifp) +void +dhd_set_packet_filter(dhd_pub_t *dhd) { -} - -/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ -static int -dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) -{ - int idx, prio, sta_pool_memsz; - dhd_sta_t * sta; - dhd_sta_pool_t * sta_pool; - void * staid_allocator; - - ASSERT(dhdp != (dhd_pub_t *)NULL); - ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); - - /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ - staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); - if (staid_allocator == NULL) { - DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); - return BCME_ERROR; - } - - /* Pre allocate a pool of dhd_sta objects (one extra). */ - sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ - sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); - if (sta_pool == NULL) { - DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); - id16_map_fini(dhdp->osh, staid_allocator); - return BCME_ERROR; - } - - dhdp->sta_pool = sta_pool; - dhdp->staid_allocator = staid_allocator; - - /* Initialize all sta(s) for the pre-allocated free pool. */ - bzero((uchar *)sta_pool, sta_pool_memsz); - for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ - sta = &sta_pool[idx]; - sta->idx = id16_map_alloc(staid_allocator); - ASSERT(sta->idx <= max_sta); - } - - /* Now place them into the pre-allocated free pool. */ - for (idx = 1; idx <= max_sta; idx++) { - sta = &sta_pool[idx]; - for (prio = 0; prio < (int)NUMPRIO; prio++) { - sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ - } - dhd_sta_free(dhdp, sta); - } - - return BCME_OK; -} - -/** Destruct the pool of dhd_sta_t objects. - * Caller must ensure that no STA objects are currently associated with an if. - */ -static void -dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) -{ - dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; - - if (sta_pool) { - int idx; - int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); - for (idx = 1; idx <= max_sta; idx++) { - ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); - ASSERT(sta_pool[idx].idx == ID16_INVALID); - } - MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); - dhdp->sta_pool = NULL; - } - - id16_map_fini(dhdp->osh, dhdp->staid_allocator); - dhdp->staid_allocator = NULL; -} - -/* Clear the pool of dhd_sta_t objects for built-in type driver */ -static void -dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) -{ - int idx, prio, sta_pool_memsz; - dhd_sta_t * sta; - dhd_sta_pool_t * sta_pool; - void *staid_allocator; - - if (!dhdp) { - DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); - return; - } - - sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; - staid_allocator = dhdp->staid_allocator; - - if (!sta_pool) { - DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); - return; - } - - if (!staid_allocator) { - DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); - return; - } - - /* clear free pool */ - sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); - bzero((uchar *)sta_pool, sta_pool_memsz); - - /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ - id16_map_clear(staid_allocator, max_sta, 1); - - /* Initialize all sta(s) for the pre-allocated free pool. */ - for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ - sta = &sta_pool[idx]; - sta->idx = id16_map_alloc(staid_allocator); - ASSERT(sta->idx <= max_sta); - } - /* Now place them into the pre-allocated free pool. */ - for (idx = 1; idx <= max_sta; idx++) { - sta = &sta_pool[idx]; - for (prio = 0; prio < (int)NUMPRIO; prio++) { - sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ - } - dhd_sta_free(dhdp, sta); - } -} - -/** Find STA with MAC address ea in an interface's STA list. */ -dhd_sta_t * -dhd_find_sta(void *pub, int ifidx, void *ea) -{ - dhd_sta_t *sta; - dhd_if_t *ifp; - unsigned long flags; - - ASSERT(ea != NULL); - ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); - if (ifp == NULL) - return DHD_STA_NULL; - - DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry(sta, &ifp->sta_list, list) { - if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { - DHD_INFO(("%s: Found STA " MACDBG "\n", - __FUNCTION__, MAC2STRDBG((char *)ea))); - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - return sta; - } - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - - return DHD_STA_NULL; -} - -/** Add STA into the interface's STA list. */ -dhd_sta_t * -dhd_add_sta(void *pub, int ifidx, void *ea) -{ - dhd_sta_t *sta; - dhd_if_t *ifp; - unsigned long flags; - - ASSERT(ea != NULL); - ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); - if (ifp == NULL) - return DHD_STA_NULL; - - if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) { - DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea)); - return DHD_STA_NULL; - } - - sta = dhd_sta_alloc((dhd_pub_t *)pub); - if (sta == DHD_STA_NULL) { - DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); - return DHD_STA_NULL; - } - - memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); - - /* link the sta and the dhd interface */ - sta->ifp = ifp; - sta->ifidx = ifidx; - INIT_LIST_HEAD(&sta->list); - - DHD_IF_STA_LIST_LOCK(ifp, flags); - - list_add_tail(&sta->list, &ifp->sta_list); - - DHD_ERROR(("%s: Adding STA " MACDBG "\n", - __FUNCTION__, MAC2STRDBG((char *)ea))); - - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - - return sta; -} - -/** Delete all STAs from the interface's STA list. */ -void -dhd_del_all_sta(void *pub, int ifidx) -{ - dhd_sta_t *sta, *next; - dhd_if_t *ifp; - unsigned long flags; - - ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); - if (ifp == NULL) - return; - - DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { - - list_del(&sta->list); - dhd_sta_free(&ifp->info->pub, sta); -#ifdef DHD_L2_FILTER - if (ifp->parp_enable) { - /* clear Proxy ARP cache of specific Ethernet Address */ - bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, - ifp->phnd_arp_table, FALSE, - sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); - } -#endif /* DHD_L2_FILTER */ - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - - return; -} - -/** Delete STA from the interface's STA list. */ -void -dhd_del_sta(void *pub, int ifidx, void *ea) -{ - dhd_sta_t *sta, *next; - dhd_if_t *ifp; - unsigned long flags; - - ASSERT(ea != NULL); - ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); - if (ifp == NULL) - return; - - DHD_IF_STA_LIST_LOCK(ifp, flags); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { - if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { - DHD_ERROR(("%s: Deleting STA " MACDBG "\n", - __FUNCTION__, MAC2STRDBG(sta->ea.octet))); - list_del(&sta->list); - dhd_sta_free(&ifp->info->pub, sta); - } - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - DHD_IF_STA_LIST_UNLOCK(ifp, flags); -#ifdef DHD_L2_FILTER - if (ifp->parp_enable) { - /* clear Proxy ARP cache of specific Ethernet Address */ - bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, - ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); - } -#endif /* DHD_L2_FILTER */ - return; -} - -/** Add STA if it doesn't exist. Not reentrant. */ -dhd_sta_t* -dhd_findadd_sta(void *pub, int ifidx, void *ea) -{ - dhd_sta_t *sta; - - sta = dhd_find_sta(pub, ifidx, ea); - - if (!sta) { - /* Add entry */ - sta = dhd_add_sta(pub, ifidx, ea); - } - - return sta; -} - -#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) -static struct list_head * -dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) -{ - unsigned long flags; - dhd_sta_t *sta, *snapshot; - - INIT_LIST_HEAD(snapshot_list); - - DHD_IF_STA_LIST_LOCK(ifp, flags); - - list_for_each_entry(sta, &ifp->sta_list, list) { - /* allocate one and add to snapshot */ - snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); - if (snapshot == NULL) { - DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); - continue; - } - - memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); - - INIT_LIST_HEAD(&snapshot->list); - list_add_tail(&snapshot->list, snapshot_list); - } - - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - - return snapshot_list; -} - -static void -dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) -{ - dhd_sta_t *sta, *next; - - list_for_each_entry_safe(sta, next, snapshot_list, list) { - list_del(&sta->list); - MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); - } -} -#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ - -#else -static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } -static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} -static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } -static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} -static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} -dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } -dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } -void dhd_del_sta(void *pub, int ifidx, void *ea) {} -#endif /* PCIE_FULL_DONGLE */ - -#if defined(DHD_LB) - -#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \ - defined(DHD_LB_RXP) -/** - * dhd_tasklet_schedule - Function that runs in IPI context of the destination - * CPU and schedules a tasklet. - * @tasklet: opaque pointer to the tasklet - */ -INLINE void -dhd_tasklet_schedule(void *tasklet) -{ - tasklet_schedule((struct tasklet_struct *)tasklet); -} -/** - * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU - * @tasklet: tasklet to be scheduled - * @on_cpu: cpu core id - * - * If the requested cpu is online, then an IPI is sent to this cpu via the - * smp_call_function_single with no wait and the tasklet_schedule function - * will be invoked to schedule the specified tasklet on the requested CPU. - */ -INLINE void -dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) -{ - const int wait = 0; - smp_call_function_single(on_cpu, - dhd_tasklet_schedule, (void *)tasklet, wait); -} - -/** - * dhd_work_schedule_on - Executes the passed work in a given CPU - * @work: work to be scheduled - * @on_cpu: cpu core id - * - * If the requested cpu is online, then an IPI is sent to this cpu via the - * schedule_work_on and the work function - * will be invoked to schedule the specified work on the requested CPU. - */ - -INLINE void -dhd_work_schedule_on(struct work_struct *work, int on_cpu) -{ - schedule_work_on(on_cpu, work); -} -#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */ - -#if defined(DHD_LB_TXC) -/** - * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet - * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and - * freeing the packets placed in the tx_compl workq - */ -void -dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) -{ - dhd_info_t *dhd = dhdp->info; - int curr_cpu, on_cpu; - - if (dhd->rx_napi_netdev == NULL) { - DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); - return; - } - - DHD_LB_STATS_INCR(dhd->txc_sched_cnt); - /* - * If the destination CPU is NOT online or is same as current CPU - * no need to schedule the work - */ - curr_cpu = get_cpu(); - put_cpu(); - - on_cpu = atomic_read(&dhd->tx_compl_cpu); - - if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { - dhd_tasklet_schedule(&dhd->tx_compl_tasklet); - } else { - schedule_work(&dhd->tx_compl_dispatcher_work); - } -} - -static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) -{ - struct dhd_info *dhd = - container_of(work, struct dhd_info, tx_compl_dispatcher_work); - int cpu; - - get_online_cpus(); - cpu = atomic_read(&dhd->tx_compl_cpu); - if (!cpu_online(cpu)) - dhd_tasklet_schedule(&dhd->tx_compl_tasklet); - else - dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); - put_online_cpus(); -} -#endif /* DHD_LB_TXC */ - -#if defined(DHD_LB_RXC) -/** - * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet - * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers - * in the H2D RxBuffer Post common ring, by using the recycled pktids that were - * placed in the rx_compl workq. - * - * @dhdp: pointer to dhd_pub object - */ -void -dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) -{ - dhd_info_t *dhd = dhdp->info; - int curr_cpu, on_cpu; - - if (dhd->rx_napi_netdev == NULL) { - DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); - return; - } - - DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); - /* - * If the destination CPU is NOT online or is same as current CPU - * no need to schedule the work - */ - curr_cpu = get_cpu(); - put_cpu(); - on_cpu = atomic_read(&dhd->rx_compl_cpu); - - if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { - dhd_tasklet_schedule(&dhd->rx_compl_tasklet); - } else { - schedule_work(&dhd->rx_compl_dispatcher_work); - } -} - -static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) -{ - struct dhd_info *dhd = - container_of(work, struct dhd_info, rx_compl_dispatcher_work); - int cpu; - - get_online_cpus(); - cpu = atomic_read(&dhd->rx_compl_cpu); - if (!cpu_online(cpu)) - dhd_tasklet_schedule(&dhd->rx_compl_tasklet); - else { - dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); - } - put_online_cpus(); -} -#endif /* DHD_LB_RXC */ - -#if defined(DHD_LB_TXP) -static void dhd_tx_dispatcher_work(struct work_struct * work) -{ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - struct dhd_info *dhd = - container_of(work, struct dhd_info, tx_dispatcher_work); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - dhd_tasklet_schedule(&dhd->tx_tasklet); -} - -static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp) -{ - int cpu; - int net_tx_cpu; - dhd_info_t *dhd = dhdp->info; - - preempt_disable(); - cpu = atomic_read(&dhd->tx_cpu); - net_tx_cpu = atomic_read(&dhd->net_tx_cpu); - - /* - * Now if the NET_TX has pushed the packet in the same - * CPU that is chosen for Tx processing, seperate it out - * i.e run the TX processing tasklet in compl_cpu - */ - if (net_tx_cpu == cpu) - cpu = atomic_read(&dhd->tx_compl_cpu); - - if (!cpu_online(cpu)) { - /* - * Ooohh... but the Chosen CPU is not online, - * Do the job in the current CPU itself. - */ - dhd_tasklet_schedule(&dhd->tx_tasklet); - } else { - /* - * Schedule tx_dispatcher_work to on the cpu which - * in turn will schedule tx_tasklet. - */ - dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu); - } - preempt_enable(); -} - -/** - * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet - * on another cpu. The tx_tasklet will take care of actually putting - * the skbs into appropriate flow ring and ringing H2D interrupt - * - * @dhdp: pointer to dhd_pub object - */ -static void -dhd_lb_tx_dispatch(dhd_pub_t *dhdp) -{ - dhd_info_t *dhd = dhdp->info; - int curr_cpu; - - curr_cpu = get_cpu(); - put_cpu(); - - /* Record the CPU in which the TX request from Network stack came */ - atomic_set(&dhd->net_tx_cpu, curr_cpu); - - /* Schedule the work to dispatch ... */ - dhd_tx_dispatcher_fn(dhdp); -} -#endif /* DHD_LB_TXP */ - -#if defined(DHD_LB_RXP) -/** - * dhd_napi_poll - Load balance napi poll function to process received - * packets and send up the network stack using netif_receive_skb() - * - * @napi: napi object in which context this poll function is invoked - * @budget: number of packets to be processed. - * - * Fetch the dhd_info given the rx_napi_struct. Move all packets from the - * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). - * Dequeue each packet from head of rx_process_queue, fetch the ifid from the - * packet tag and sendup. - */ -static int -dhd_napi_poll(struct napi_struct *napi, int budget) -{ - int ifid; - const int pkt_count = 1; - const int chan = 0; - struct sk_buff * skb; - unsigned long flags; - struct dhd_info *dhd; - int processed = 0; - struct sk_buff_head rx_process_queue; - -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - dhd = container_of(napi, struct dhd_info, rx_napi_struct); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - - DHD_INFO(("%s napi_queue<%d> budget<%d>\n", - __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); - __skb_queue_head_init(&rx_process_queue); - - /* extract the entire rx_napi_queue into local rx_process_queue */ - spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); - skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); - spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); - - while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { - OSL_PREFETCH(skb->data); - - ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); - - DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", - __FUNCTION__, skb, ifid)); - - dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); - processed++; - } - - DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); - - DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); - napi_complete(napi); - - return budget - 1; -} - -/** - * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi - * poll list. This function may be invoked via the smp_call_function_single - * from a remote CPU. - * - * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) - * after the napi_struct is added to the softnet data's poll_list - * - * @info: pointer to a dhd_info struct - */ -static void -dhd_napi_schedule(void *info) -{ - dhd_info_t *dhd = (dhd_info_t *)info; - - DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", - __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); - - /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ - if (napi_schedule_prep(&dhd->rx_napi_struct)) { - __napi_schedule(&dhd->rx_napi_struct); - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); - } - - /* - * If the rx_napi_struct was already running, then we let it complete - * processing all its packets. The rx_napi_struct may only run on one - * core at a time, to avoid out-of-order handling. - */ -} - -/** - * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ - * action after placing the dhd's rx_process napi object in the the remote CPU's - * softnet data's poll_list. - * - * @dhd: dhd_info which has the rx_process napi object - * @on_cpu: desired remote CPU id - */ -static INLINE int -dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) -{ - int wait = 0; /* asynchronous IPI */ - DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", - __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); - - if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { - DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", - __FUNCTION__, on_cpu)); - } - - DHD_LB_STATS_INCR(dhd->napi_sched_cnt); - - return 0; -} - -/* - * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on - * Why should we do this? - * The candidacy algorithm is run from the call back function - * registered to CPU hotplug notifier. This call back happens from Worker - * context. The dhd_napi_schedule_on is also from worker context. - * Note that both of this can run on two different CPUs at the same time. - * So we can possibly have a window where a given CPUn is being brought - * down from CPUm while we try to run a function on CPUn. - * To prevent this its better have the whole code to execute an SMP - * function under get_online_cpus. - * This function call ensures that hotplug mechanism does not kick-in - * until we are done dealing with online CPUs - * If the hotplug worker is already running, no worries because the - * candidacy algo would then reflect the same in dhd->rx_napi_cpu. - * - * The below mentioned code structure is proposed in - * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt - * for the question - * Q: I need to ensure that a particular cpu is not removed when there is some - * work specific to this cpu is in progress - * - * According to the documentation calling get_online_cpus is NOT required, if - * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can - * run from Work Queue context we have to call these functions - */ -static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) -{ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - struct dhd_info *dhd = - container_of(work, struct dhd_info, rx_napi_dispatcher_work); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - - dhd_napi_schedule(dhd); -} - -/** - * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct - * to run on another CPU. The rx_napi_struct's poll function will retrieve all - * the packets enqueued into the rx_napi_queue and sendup. - * The producer's rx packet queue is appended to the rx_napi_queue before - * dispatching the rx_napi_struct. - */ -void -dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) -{ - unsigned long flags; - dhd_info_t *dhd = dhdp->info; - int curr_cpu; - int on_cpu; -#ifdef DHD_LB_IRQSET - cpumask_t cpus; -#endif /* DHD_LB_IRQSET */ - - if (dhd->rx_napi_netdev == NULL) { - DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); - return; - } - - DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, - skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); - - /* append the producer's queue of packets to the napi's rx process queue */ - spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); - skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); - spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); - - /* - * If the destination CPU is NOT online or is same as current CPU - * no need to schedule the work - */ - curr_cpu = get_cpu(); - put_cpu(); - - preempt_disable(); - on_cpu = atomic_read(&dhd->rx_napi_cpu); -#ifdef DHD_LB_IRQSET - if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) || - (!cpu_online(on_cpu))) -#else - if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) -#endif /* DHD_LB_IRQSET */ - { - DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__, - curr_cpu, *cpumask_bits(dhd->cpumask_primary))); - dhd_napi_schedule(dhd); - } else { - DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n", - __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary))); - dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu); - } - preempt_enable(); -} - -/** - * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue - */ -void -dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) -{ - dhd_info_t *dhd = dhdp->info; - - DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, - pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); - DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); - __skb_queue_tail(&dhd->rx_pend_queue, pkt); -} -#endif /* DHD_LB_RXP */ - -#ifdef DHD_LB_IRQSET -void -dhd_irq_set_affinity(dhd_pub_t *dhdp) -{ - unsigned int irq = (unsigned int)-1; - int err = BCME_OK; - - if (!dhdp) { - DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__)); - return; - } - - if (!dhdp->bus) { - DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__)); - return; - } - - dhdpcie_get_pcieirq(dhdp->bus, &irq); - err = irq_set_affinity(irq, dhdp->info->cpumask_primary); - if (err) - DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n", - __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary))); -} -#endif /* DHD_LB_IRQSET */ -#endif /* DHD_LB */ - -/** Returns dhd iflist index corresponding the the bssidx provided by apps */ -int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) -{ - dhd_if_t *ifp; - dhd_info_t *dhd = dhdp->info; - int i; - - ASSERT(bssidx < DHD_MAX_IFS); - ASSERT(dhdp); - - for (i = 0; i < DHD_MAX_IFS; i++) { - ifp = dhd->iflist[i]; - if (ifp && (ifp->bssidx == bssidx)) { - DHD_TRACE(("Index manipulated for %s from %d to %d\n", - ifp->name, bssidx, i)); - break; - } - } - return i; -} - -static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) -{ - uint32 store_idx; - uint32 sent_idx; - - if (!skb) { - DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); - return BCME_ERROR; - } - - dhd_os_rxflock(dhdp); - store_idx = dhdp->store_idx; - sent_idx = dhdp->sent_idx; - if (dhdp->skbbuf[store_idx] != NULL) { - /* Make sure the previous packets are processed */ - dhd_os_rxfunlock(dhdp); - DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", - skb, store_idx, sent_idx)); - /* removed msleep here, should use wait_event_timeout if we - * want to give rx frame thread a chance to run - */ -#if defined(WAIT_DEQUEUE) - OSL_SLEEP(1); -#endif // endif - return BCME_ERROR; - } - DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", - skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); - dhdp->skbbuf[store_idx] = skb; - dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); - dhd_os_rxfunlock(dhdp); - - return BCME_OK; -} - -static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) -{ - uint32 store_idx; - uint32 sent_idx; - void *skb; - - dhd_os_rxflock(dhdp); - - store_idx = dhdp->store_idx; - sent_idx = dhdp->sent_idx; - skb = dhdp->skbbuf[sent_idx]; - - if (skb == NULL) { - dhd_os_rxfunlock(dhdp); - DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", - store_idx, sent_idx)); - return NULL; - } - - dhdp->skbbuf[sent_idx] = NULL; - dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); - - DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", - skb, sent_idx)); - - dhd_os_rxfunlock(dhdp); - - return skb; -} - -int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) -{ - if (prepost) { /* pre process */ - dhd_read_cis(dhdp); - dhd_check_module_cid(dhdp); - dhd_check_module_mac(dhdp); - dhd_set_macaddr_from_file(dhdp); - } else { /* post process */ - dhd_write_macaddr(&dhdp->mac); - dhd_clear_cis(dhdp); - } - - return 0; -} - -// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed -#if defined(PKT_FILTER_SUPPORT) -#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) -static bool -_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param) -{ - bool _apply = FALSE; - /* In case of IBSS mode, apply arp pkt filter */ - if (op_mode_param & DHD_FLAG_IBSS_MODE) { - _apply = TRUE; - goto exit; - } - /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ - if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) { - _apply = TRUE; - goto exit; - } - -exit: - return _apply; -} -#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ - -void -dhd_set_packet_filter(dhd_pub_t *dhd) -{ - int i; + int i; DHD_TRACE(("%s: enter\n", __FUNCTION__)); if (dhd_pkt_filter_enable) { @@ -3403,14 +1707,16 @@ dhd_enable_packet_filter(int value, dhd_pub_t *dhd) int i; DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); - if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value && + !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) { DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); return; } /* 1 - Enable packet filter, only allow unicast packet to send up */ /* 0 - Disable packet filter */ if (dhd_pkt_filter_enable && (!value || - (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) + (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) || + dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))) { for (i = 0; i < dhd->pktfilter_count; i++) { // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed @@ -3519,7 +1825,7 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) uint roamvar = dhd->conf->roam_off_suspend; #endif /* ENABLE_FW_ROAM_SUSPEND */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP - int bcn_li_bcn; + int bcn_li_bcn = 1; #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ uint nd_ra_filter = 0; #ifdef ENABLE_IPMCAST_FILTER @@ -3598,6 +1904,9 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd)); #endif /* APF */ #endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + dhd_arp_offload_enable(dhd, TRUE); +#endif /* ARP_OFFLOAD_SUPPORT */ #ifdef PASS_ALL_MCAST_PKTS allmulti = 0; @@ -3668,7 +1977,14 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0) DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); #endif /* OEM_ANDROID && BCMPCIE */ - +#ifdef WL_CFG80211 + /* Disable cfg80211 feature events during suspend */ + ret = wl_cfg80211_config_suspend_events( + dhd_linux_get_primary_netdev(dhd), FALSE); + if (ret < 0) { + DHD_ERROR(("failed to disable events (%d)\n", ret)); + } +#endif /* WL_CFG80211 */ #ifdef DHD_USE_EARLYSUSPEND #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; @@ -3695,7 +2011,9 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) } #endif /* ENABLE_FW_ROAM_SUSPEND */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP - bcn_li_bcn = 0; + if (bcn_li_dtim) { + bcn_li_bcn = 0; + } ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE); if (ret < 0) { @@ -3767,10 +2085,18 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) __FUNCTION__, ret)); } #endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef CONFIG_SILENT_ROAM + if (!dhd->sroamed) { + ret = dhd_sroam_set_mon(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set sroam failed %d\n", + __FUNCTION__, ret)); + } + } + dhd->sroamed = FALSE; +#endif /* CONFIG_SILENT_ROAM */ #endif /* DHD_USE_EARLYSUSPEND */ - dhd_conf_set_suspend_resume(dhd, value); } else { - dhd_conf_set_suspend_resume(dhd, value); #ifdef PKT_FILTER_SUPPORT dhd->early_suspended = 0; #endif // endif @@ -3793,6 +2119,9 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) ret)); } #endif /* WL_CF80211 && WL_BCNRECV */ +#ifdef ARP_OFFLOAD_SUPPORT + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ #ifdef PKT_FILTER_SUPPORT /* disable pkt filter */ dhd_enable_packet_filter(0, dhd); @@ -3868,7 +2197,6 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) } #endif /* ENABLE_FW_ROAM_SUSPEND */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP - bcn_li_bcn = 1; ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE); if (ret < 0) { @@ -3920,15 +2248,24 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) __FUNCTION__, ret)); } #endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef CONFIG_SILENT_ROAM + ret = dhd_sroam_set_mon(dhd, FALSE); + if (ret < 0) { + DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret)); + } +#endif /* CONFIG_SILENT_ROAM */ #endif /* DHD_USE_EARLYSUSPEND */ +#ifdef WL_CFG80211 + /* Enable cfg80211 feature events during resume */ + ret = wl_cfg80211_config_suspend_events( + dhd_linux_get_primary_netdev(dhd), TRUE); + if (ret < 0) { + DHD_ERROR(("failed to enable events (%d)\n", ret)); + } +#endif /* WL_CFG80211 */ #ifdef DHD_LB_IRQSET - dhd_irq_set_affinity(dhd); + dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary); #endif /* DHD_LB_IRQSET */ - - /* terence 2017029: Reject in early suspend */ - if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) { - dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); - } } } dhd_suspend_unlock(dhd); @@ -3947,7 +2284,7 @@ static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) /* Set flag when early suspend was called */ dhdp->in_suspend = val; if ((force || !dhdp->suspend_disable_flag) && - (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp))) + (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND))) { ret = dhd_set_suspend(val, dhdp); } @@ -4019,14 +2356,13 @@ dhd_timeout_expired(dhd_timeout_t *tmo) if (tmo->increment > tmo->tick) tmo->increment = tmo->tick; } else { - wait_queue_head_t delay_wait; - DECLARE_WAITQUEUE(wait, current); - init_waitqueue_head(&delay_wait); - add_wait_queue(&delay_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - (void)schedule_timeout(1); - remove_wait_queue(&delay_wait, &wait); - set_current_state(TASK_RUNNING); + /* + * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic + * context where the exact wakeup time is flexible, it would be good + * to use usleep_range() instead of udelay(). It takes a few advantages + * such as improving responsiveness and reducing power. + */ + OSL_SLEEP(jiffies_to_msecs(1)); } return 0; @@ -4124,11 +2460,7 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) { struct net_device *dev; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) struct netdev_hw_addr *ha; -#else - struct dev_mc_list *mclist; -#endif // endif uint32 allmulti, cnt; wl_ioctl_t ioc; @@ -4147,18 +2479,10 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) dev = dhd->iflist[i]->net; if (!dev) continue; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif /* LINUX >= 2.6.27 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) cnt_iface[i] = netdev_mc_count(dev); cnt += cnt_iface[i]; -#else - cnt += dev->mc_count; -#endif /* LINUX >= 2.6.35 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif /* LINUX >= 2.6.27 */ /* Determine initial value of allmulti flag */ allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; @@ -4172,17 +2496,9 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) dev = dhd->iflist[ifidx]->net; if (!dev) return; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif /* LINUX >= 2.6.27 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) cnt = netdev_mc_count(dev); -#else - cnt = dev->mc_count; -#endif /* LINUX >= 2.6.35 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif /* LINUX >= 2.6.27 */ /* Determine initial value of allmulti flag */ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; @@ -4218,10 +2534,7 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i)); dev = dhd->iflist[i]->net; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif /* LINUX >= 2.6.27 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" @@ -4239,23 +2552,11 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) cnt_iface[i], MAC2STRDBG(ha->addr))); cnt_iface[i]--; } -#else /* LINUX < 2.6.35 */ - for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0)); - cnt_iface[i]--, mclist = mclist->next) { - memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); - bufp += ETHER_ADDR_LEN; - } -#endif /* LINUX >= 2.6.35 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif /* LINUX >= 2.6.27 */ } } #else /* !MCAST_LIST_ACCUMULATION */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif /* LINUX >= 2.6.27 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" @@ -4270,16 +2571,7 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) #pragma GCC diagnostic pop #endif // endif -#else /* LINUX < 2.6.35 */ - for (mclist = dev->mc_list; (mclist && (cnt > 0)); - cnt--, mclist = mclist->next) { - memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); - bufp += ETHER_ADDR_LEN; - } -#endif /* LINUX >= 2.6.35 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif /* LINUX >= 2.6.27 */ #endif /* MCAST_LIST_ACCUMULATION */ memset(&ioc, 0, sizeof(ioc)); @@ -4352,6 +2644,7 @@ _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr) memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); if (ifidx == 0) memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + DHD_ERROR(("%s: MACID is overwritten ifidx=%d, mac=%pM\n", __FUNCTION__, ifidx, addr)); } return ret; @@ -4450,6 +2743,9 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) int ret; #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) struct wl_if_event_info info; +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + struct net_device *ndev = NULL; +#endif #else struct net_device *ndev; #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ @@ -4492,8 +2788,14 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) mac_addr = NULL; } +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, + &info, mac_addr, NULL, true)) == NULL) +#else if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, - &info, mac_addr, NULL, true) == NULL) { + &info, mac_addr, NULL, true) == NULL) +#endif + { /* Do the post interface create ops */ DHD_ERROR(("Post ifcreate ops failed. Returning \n")); goto done; @@ -4535,6 +2837,9 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) done: MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + dhd_bridge_dev_set(dhd, ifidx, ndev); +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); @@ -4569,6 +2874,9 @@ dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) ifidx = if_event->event.ifidx; DHD_TRACE(("Removing interface with idx %d\n", ifidx)); +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) + dhd_bridge_dev_set(dhd, ifidx, NULL); +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ DHD_PERIM_UNLOCK(&dhd->pub); if (!dhd->pub.info->iflist[ifidx]) { @@ -4850,61 +3158,13 @@ dhd_os_wlfc_unblock(dhd_pub_t *pub) return 1; #ifdef BCMDBUS spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags); -#else - spin_unlock_bh(&di->wlfc_spinlock); -#endif /* BCMDBUS */ - return 1; -} - -#endif /* PROP_TXSTATUS */ - -#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP) -typedef struct { - uint16 type; - const char *str; -} PKTTYPE_INFO; - -static const PKTTYPE_INFO packet_type_info[] = -{ - { ETHER_TYPE_IP, "IP" }, - { ETHER_TYPE_ARP, "ARP" }, - { ETHER_TYPE_BRCM, "BRCM" }, - { ETHER_TYPE_802_1X, "802.1X" }, - { ETHER_TYPE_WAI, "WAPI" }, - { 0, ""} -}; - -static const char *_get_packet_type_str(uint16 type) -{ - int i; - int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1; - - for (i = 0; i < n; i++) { - if (packet_type_info[i].type == type) - return packet_type_info[i].str; - } - - return packet_type_info[n].str; -} - -void -dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx) -{ - uint16 protocol; - char *ifname; - - protocol = (dump_data[12] << 8) | dump_data[13]; - ifname = ndev ? ndev->name : "N/A"; - - if (protocol != ETHER_TYPE_BRCM) { - DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname, - _get_packet_type_str(protocol))); -#if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP) - prhex("Data", dump_data, datalen); -#endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */ - } +#else + spin_unlock_bh(&di->wlfc_spinlock); +#endif /* BCMDBUS */ + return 1; } -#endif /* DHD_TX_DUMP || DHD_RX_DUMP */ + +#endif /* PROP_TXSTATUS */ /* This routine do not support Packet chain feature, Currently tested for * proxy arp feature @@ -4944,15 +3204,7 @@ int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) */ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) netif_rx_ni(skb); -#else - ulong flags; - netif_rx(skb); - local_irq_save(flags); - RAISE_RX_SOFTIRQ(); - local_irq_restore(flags); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ } } @@ -4968,6 +3220,9 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) int ret = BCME_OK; dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh = NULL; + bool pkt_ether_type_802_1x = FALSE; + uint8 pkt_flow_prio; + #if defined(DHD_L2_FILTER) dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); #endif // endif @@ -4983,11 +3238,7 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) if (dhdp->busstate == DHD_BUS_SUSPEND) { DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); PKTCFREE(dhdp->osh, pktbuf, TRUE); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return -ENODEV; -#else return NETDEV_TX_BUSY; -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */ } #endif /* PCIE_FULL_DONGLE */ @@ -5051,26 +3302,19 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) /* back up 802.1x's priority */ dhdp->prio_8021x = prio; #endif /* DHD_LOSSLESS_ROAMING */ + pkt_ether_type_802_1x = TRUE; DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED); atomic_inc(&dhd->pend_8021x_cnt); #if defined(WL_CFG80211) && defined(WL_WPS_SYNC) wl_handle_wps_states(dhd_idx2net(dhdp, ifidx), pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE); #endif /* WL_CFG80211 && WL_WPS_SYNC */ - dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), pktdata, TRUE); - } - - if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { -#ifdef DHD_DHCP_DUMP - dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); -#endif /* DHD_DHCP_DUMP */ -#ifdef DHD_ICMP_DUMP - dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); -#endif /* DHD_ICMP_DUMP */ } + dhd_dump_pkt(dhdp, ifidx, pktdata, + (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL); } else { - PKTCFREE(dhdp->osh, pktbuf, TRUE); - return BCME_ERROR; + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; } { @@ -5096,23 +3340,36 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) #endif /* !PKTPRIO_OVERRIDE */ } + BCM_REFERENCE(pkt_ether_type_802_1x); + BCM_REFERENCE(pkt_flow_prio); + +#ifdef SUPPORT_SET_TID + dhd_set_tid_based_on_uid(dhdp, pktbuf); +#endif /* SUPPORT_SET_TID */ + #ifdef PCIE_FULL_DONGLE /* * Lkup the per interface hash table, for a matching flowring. If one is not * available, allocate a unique flowid and add a flowring entry. * The found or newly created flowid is placed into the pktbuf's tag. */ - ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf); + +#ifdef DHD_LOSSLESS_ROAMING + /* For LLR override and use flowring with prio 7 for 802.1x packets */ + if (pkt_ether_type_802_1x) { + pkt_flow_prio = PRIO_8021D_NC; + } else +#endif /* DHD_LOSSLESS_ROAMING */ + { + pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))]; + } + + ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf); if (ret != BCME_OK) { PKTCFREE(dhd->pub.osh, pktbuf, TRUE); return ret; } -#endif // endif - -#if defined(DHD_TX_DUMP) - dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf), - PKTLEN(dhdp->osh, pktbuf), TRUE); -#endif +#endif /* PCIE_FULL_DONGLE */ /* terence 20150901: Micky add to ajust the 802.1X priority */ /* Set the 802.1X packet with the highest priority 7 */ if (dhdp->conf->pktprio8021x >= 0) @@ -5219,36 +3476,6 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) return ret; } -#if defined(DHD_LB_TXP) - -int BCMFASTPATH -dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, - int ifidx, void *skb) -{ - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt); - - /* If the feature is disabled run-time do TX from here */ - if (atomic_read(&dhd->lb_txp_active) == 0) { - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); - return __dhd_sendpkt(&dhd->pub, ifidx, skb); - } - - /* Store the address of net device and interface index in the Packet tag */ - DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net); - DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx); - - /* Enqueue the skb into tx_pend_queue */ - skb_queue_tail(&dhd->tx_pend_queue, skb); - - DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net)); - - /* Dispatch the Tx job to be processed by the tx_tasklet */ - dhd_lb_tx_dispatch(&dhd->pub); - - return NETDEV_TX_OK; -} -#endif /* DHD_LB_TXP */ - int BCMFASTPATH dhd_start_xmit(struct sk_buff *skb, struct net_device *net) { @@ -5267,16 +3494,6 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) return -ENODEV; } - /* terence 2017029: Reject in early suspend */ - if ((dhd->pub.conf->insuspend & NO_TXDATA_IN_SUSPEND) && dhd->pub.early_suspended) { - dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return -ENODEV; -#else - return NETDEV_TX_BUSY; -#endif - } - DHD_GENERAL_LOCK(&dhd->pub, flags); DHD_BUS_BUSY_SET_IN_TX(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); @@ -5295,11 +3512,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) #endif /* PCIE_FULL_DONGLE */ dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return -ENODEV; -#else return NETDEV_TX_BUSY; -#endif // endif } #else if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { @@ -5327,11 +3540,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) DHD_GENERAL_UNLOCK(&dhd->pub, flags); DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return -ENODEV; -#else return NETDEV_TX_BUSY; -#endif // endif } ifp = DHD_DEV_IFP(net); @@ -5346,11 +3555,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) DHD_GENERAL_UNLOCK(&dhd->pub, flags); DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return -ENODEV; -#else return NETDEV_TX_BUSY; -#endif // endif } DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT); @@ -5393,6 +3598,12 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); } + /* move from dhdsdio_sendfromq(), try to orphan skb early */ + if (dhd->pub.conf->orphan_move == 2) + PKTORPHAN(skb, dhd->pub.conf->tsq); + else if (dhd->pub.conf->orphan_move == 3) + skb_orphan(skb); + /* Convert to packet */ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { DHD_ERROR(("%s: PKTFRMNATIVE failed\n", @@ -5429,6 +3640,12 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } #endif /* DHD_PSTA */ +#ifdef DHDTCPSYNC_FLOOD_BLK + if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) { + ifp->tsyncack_txed ++; + } +#endif /* DHDTCPSYNC_FLOOD_BLK */ + #ifdef DHDTCPACK_SUPPRESS if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { /* If this packet has been hold or got freed, just return */ @@ -5480,11 +3697,7 @@ done: DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); /* Return ok: we always eat the packet */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) - return 0; -#else return NETDEV_TX_OK; -#endif // endif } #ifdef DHD_PCIE_NATIVE_RUNTIMEPM @@ -5603,6 +3816,25 @@ dhd_bus_wakeup_work(dhd_pub_t *dhdp) } #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +static void +__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state) +{ + + if ((state == ON) && (dhdp->txoff == FALSE)) { + netif_stop_queue(net); + dhd_prot_update_pktid_txq_stop_cnt(dhdp); + } else if (state == ON) { + DHD_ERROR(("%s: Netif Queue has already stopped\n", __FUNCTION__)); + } + if ((state == OFF) && (dhdp->txoff == TRUE)) { + netif_wake_queue(net); + dhd_prot_update_pktid_txq_start_cnt(dhdp); + } else if (state == OFF) { + DHD_ERROR(("%s: Netif Queue has already started\n", __FUNCTION__)); + } +} + void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) { @@ -5622,26 +3854,19 @@ dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) #endif // endif if (ifidx == ALL_INTERFACES) { - /* Flow control on all active interfaces */ - dhdp->txoff = state; for (i = 0; i < DHD_MAX_IFS; i++) { if (dhd->iflist[i]) { net = dhd->iflist[i]->net; - if (state == ON) - netif_stop_queue(net); - else - netif_wake_queue(net); + __dhd_txflowcontrol(dhdp, net, state); } } } else { if (dhd->iflist[ifidx]) { net = dhd->iflist[ifidx]->net; - if (state == ON) - netif_stop_queue(net); - else - netif_wake_queue(net); + __dhd_txflowcontrol(dhdp, net, state); } } + dhdp->txoff = state; } #ifdef DHD_MCAST_REGEN @@ -5690,15 +3915,7 @@ dhd_netif_rx_ni(struct sk_buff * skb) * does netif_rx, disables irq, raise NET_IF_RX softirq and * enables interrupts back */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) netif_rx_ni(skb); -#else - ulong flags; - netif_rx(skb); - local_irq_save(flags); - RAISE_RX_SOFTIRQ(); - local_irq_restore(flags); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ } static int @@ -5720,12 +3937,7 @@ dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb) */ pktlen = skb->len + ETH_HLEN; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) pktdata = (void *)skb_mac_header(skb); -#else - pktdata = (void *)skb->mac.raw; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ - ret = wl_host_event_get_data(pktdata, pktlen, &evu); if (ret != BCME_OK) { @@ -5798,6 +4010,9 @@ dhd_event_logtrace_process_items(dhd_info_t *dhd) dhd_event_logtrace_pkt_process(dhdp, skb); } + /* Dummy sleep so that scheduler kicks in after processing any logprints */ + OSL_SLEEP(0); + /* Send packet up if logtrace_pkt_sendup is TRUE */ if (dhdp->logtrace_pkt_sendup) { #ifdef DHD_USE_STATIC_CTRLBUF @@ -5852,12 +4067,17 @@ dhd_logtrace_thread(void *data) int ret; while (1) { - dhdp->logtrace_thr_ts.entry_time = OSL_SYSUPTIME_US(); + dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS(); if (!binary_sema_down(tsk)) { - dhdp->logtrace_thr_ts.sem_down_time = OSL_SYSUPTIME_US(); + dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS(); SMP_RD_BARRIER_DEPENDS(); if (dhd->pub.dongle_reset == FALSE) { do { + /* Check terminated before processing the items */ + if (tsk->terminated) { + DHD_ERROR(("%s: task terminated\n", __FUNCTION__)); + goto exit; + } #ifdef EWP_EDL /* check if EDL is being used */ if (dhd->pub.dongle_edl_support) { @@ -5887,25 +4107,21 @@ dhd_logtrace_thread(void *data) } } while (ret > 0); } - /* Check terminated after processing the items */ - if (tsk->terminated) { - DHD_ERROR(("%s: task terminated\n", __FUNCTION__)); - break; - } if (tsk->flush_ind) { DHD_ERROR(("%s: flushed\n", __FUNCTION__)); - dhdp->logtrace_thr_ts.flush_time = OSL_SYSUPTIME_US(); + dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS(); tsk->flush_ind = 0; complete(&tsk->flushed); } } else { DHD_ERROR(("%s: unexpted break\n", __FUNCTION__)); - dhdp->logtrace_thr_ts.unexpected_break_time = OSL_SYSUPTIME_US(); + dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS(); break; } } +exit: complete_and_exit(&tsk->completed, 0); - dhdp->logtrace_thr_ts.complete_time = OSL_SYSUPTIME_US(); + dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS(); } #else static void @@ -6079,8 +4295,8 @@ dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg) /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|| */ infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32)); - pktsize = ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) + - sizeof(uint32); + pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) + + sizeof(uint32)); pkt = PKTGET(dhdp->osh, pktsize, FALSE); if (!pkt) { DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__)); @@ -6128,6 +4344,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #endif /* DHD_WAKE_STATUS */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + BCM_REFERENCE(dump_data); for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { struct ether_header *eh; @@ -6179,6 +4396,21 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n", + __FUNCTION__, ifidx)); + if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + } + continue; + } + ifp = dhd->iflist[ifidx]; if (ifp == NULL) { DHD_ERROR(("%s: ifp is NULL. drop packet\n", @@ -6282,6 +4514,29 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } #endif /* MCAST_REGEN */ +#ifdef DHDTCPSYNC_FLOOD_BLK + if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) { + int delta_sec; + int delta_sync; + int sync_per_sec; + u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + ifp->tsync_rcvd ++; + delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed; + delta_sec = curr_time - ifp->last_sync; + if (delta_sec > 1) { + sync_per_sec = delta_sync/delta_sec; + if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) { + schedule_work(&ifp->blk_tsfl_work); + DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! " + "sync recvied %d pkt/sec \n", + ifidx, sync_per_sec)); + } + dhd_reset_tcpsync_info_by_ifp(ifp); + } + + } +#endif /* DHDTCPSYNC_FLOOD_BLK */ + #ifdef DHDTCPACK_SUPPRESS dhd_tcpdata_info_get(dhdp, pktbuf); #endif // endif @@ -6326,7 +4581,16 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } } #endif /* PCIE_FULL_DONGLE */ - +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) && + (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) && + (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) { + DHD_ERROR(("%s: Reassoc is in progress. " + "Drop EAPOL M1 frame\n", __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' @@ -6338,29 +4602,21 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) */ eth = skb->data; len = skb->len; - dump_data = skb->data; - protocol = (skb->data[12] << 8) | skb->data[13]; + if (protocol == ETHER_TYPE_802_1X) { DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED); #if defined(WL_CFG80211) && defined(WL_WPS_SYNC) wl_handle_wps_states(ifp->net, dump_data, len, FALSE); #endif /* WL_CFG80211 && WL_WPS_SYNC */ - dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), dump_data, FALSE); - } - - if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { -#ifdef DHD_DHCP_DUMP - dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); -#endif /* DHD_DHCP_DUMP */ -#ifdef DHD_ICMP_DUMP - dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); -#endif /* DHD_ICMP_DUMP */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) { + OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED); + } +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ } -#ifdef DHD_RX_DUMP - dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE); -#endif /* DHD_RX_DUMP */ + dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL); skb->protocol = eth_type_trans(skb, skb->dev); @@ -6381,16 +4637,10 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { bcm_event_msg_u_t evu; - int ret_event; - int event_type; + int ret_event, event_type; + void *pkt_data = skb_mac_header(skb); - ret_event = wl_host_event_get_data( -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) - skb_mac_header(skb), -#else - skb->mac.raw, -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ - len, &evu); + ret_event = wl_host_event_get_data(pkt_data, len, &evu); if (ret_event != BCME_OK) { DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", @@ -6423,13 +4673,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } #endif /* SHOW_LOGTRACE */ - ret_event = dhd_wl_host_event(dhd, ifidx, -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) - skb_mac_header(skb), -#else - skb->mac.raw, -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ - len, &event, &data); + ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data); wl_event_to_host_order(&event); if (!tout_ctrl) @@ -6625,25 +4869,13 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ -#if defined(DHD_LB_RXP) DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#if defined(DHD_LB_RXP) netif_receive_skb(skb); - DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); #else /* !defined(DHD_LB_RXP) */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) - DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_rx_ni(skb); +#endif /* defined(DHD_LB_RXP) */ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); -#else - ulong flags; - DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); - netif_rx(skb); - DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); - local_irq_save(flags); - RAISE_RX_SOFTIRQ(); - local_irq_restore(flags); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ -#endif /* !defined(DHD_LB_RXP) */ } } } @@ -6791,19 +5023,9 @@ dhd_watchdog_thread(void *data) complete_and_exit(&tsk->completed, 0); } -static void dhd_watchdog( -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - struct timer_list *t -#else - ulong data -#endif -) +static void dhd_watchdog(ulong data) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - dhd_info_t *dhd = from_timer(dhd, t, timer); -#else dhd_info_t *dhd = (dhd_info_t *)data; -#endif unsigned long flags; if (dhd->pub.dongle_reset) { @@ -6977,9 +5199,6 @@ dhd_rxf_thread(void *data) } if (down_interruptible(&tsk->sema) == 0) { void *skb; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) - ulong flags; -#endif // endif #ifdef ENABLE_ADAPTIVE_SCHED dhd_sched_policy(dhd_rxf_prio); #endif /* ENABLE_ADAPTIVE_SCHED */ @@ -6999,15 +5218,7 @@ dhd_rxf_thread(void *data) PKTSETNEXT(pub->osh, skb, NULL); bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) netif_rx_ni(skb); -#else - netif_rx(skb); - local_irq_save(flags); - RAISE_RX_SOFTIRQ(); - local_irq_restore(flags); - -#endif // endif skb = skbnext; } #if defined(WAIT_DEQUEUE) @@ -7150,6 +5361,7 @@ dhd_sched_dpc(dhd_pub_t *dhdp) } return; } else { + dhd_bus_set_dpc_sched_time(dhdp); tasklet_schedule(&dhd->tasklet); } } @@ -7256,7 +5468,6 @@ void dhd_set_scb_probe(dhd_pub_t *dhd) } #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) static void dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { @@ -7269,9 +5480,7 @@ dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) struct ethtool_ops dhd_ethtool_ops = { .get_drvinfo = dhd_ethtool_get_drvinfo }; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) static int dhd_ethtool(dhd_info_t *dhd, void *uaddr) { @@ -7381,7 +5590,6 @@ dhd_ethtool(dhd_info_t *dhd, void *uaddr) return 0; } -#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) { @@ -7415,12 +5623,17 @@ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; #ifdef BCMPCIE } else if (dhdp->d3ackcnt_timeout) { - dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT; + dhdp->hang_reason = dhdp->is_sched_error ? + HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR : + HANG_REASON_D3_ACK_TIMEOUT; #endif /* BCMPCIE */ } else { - dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT; + dhdp->hang_reason = dhdp->is_sched_error ? + HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR : + HANG_REASON_IOCTL_RESP_TIMEOUT; } } + printf("%s\n", info_string); net_os_send_hang_message(net); return TRUE; } @@ -7517,19 +5730,9 @@ dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx) bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_rx_ni(dhd->monitor_skb); DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); -#else - ulong flags; - DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); - netif_rx(dhd->monitor_skb); - DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); - local_irq_save(flags); - RAISE_RX_SOFTIRQ(); - local_irq_restore(flags); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ } dhd->monitor_skb = NULL; @@ -7766,13 +5969,9 @@ void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle) dhd_pub_t *dhdp = (dhd_pub_t *)handle; dhd_info_t *dhd = (dhd_info_t*)dhdp->info; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - dhdp->hang_was_sent = 0; + dhdp->hang_was_sent = 0; - dhd_os_send_hang_message(&dhd->pub); -#else - DHD_ERROR(("%s: unsupported\n", __FUNCTION__)); -#endif // endif + dhd_os_send_hang_message(&dhd->pub); } EXPORT_SYMBOL(dhd_bus_retry_hang_recovery); @@ -7798,21 +5997,20 @@ static const struct net_device_ops netdev_monitor_ops = }; static void -dhd_add_monitor_if(void *handle, void *event_info, u8 event) +dhd_add_monitor_if(dhd_info_t *dhd) { - dhd_info_t *dhd = handle; struct net_device *dev; char *devname; uint32 scan_suppress = FALSE; int ret = BCME_OK; - if (event != DHD_WQ_WORK_IF_ADD) { - DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); return; } - if (!dhd) { - DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + if (dhd->monitor_dev) { + DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__)); return; } @@ -7836,18 +6034,13 @@ dhd_add_monitor_if(void *handle, void *event_info, u8 event) dev->type = ARPHRD_IEEE80211_RADIOTAP; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) - dev->hard_start_xmit = dhd_monitor_start; - dev->do_ioctl = dhd_monitor_ioctl; - dev->get_stats = dhd_monitor_get_stats; -#else dev->netdev_ops = &netdev_monitor_ops; -#endif // endif - if (register_netdev(dev)) { + if (register_netdevice(dev)) { DHD_ERROR(("%s, register_netdev failed for %s\n", __FUNCTION__, dev->name)); free_netdev(dev); + return; } if (FW_SUPPORTED((&dhd->pub), monitor)) { @@ -7864,57 +6057,58 @@ dhd_add_monitor_if(void *handle, void *event_info, u8 event) } static void -dhd_del_monitor_if(void *handle, void *event_info, u8 event) +dhd_del_monitor_if(dhd_info_t *dhd) { - dhd_info_t *dhd = handle; - - if (event != DHD_WQ_WORK_IF_DEL) { - DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); - return; - } + int ret = BCME_OK; + uint32 scan_suppress = FALSE; if (!dhd) { DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); return; } - if (dhd->monitor_dev) { - unregister_netdev(dhd->monitor_dev); + if (!dhd->monitor_dev) { + DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__)); + return; + } -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) - MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE); - MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device)); -#else - free_netdev(dhd->monitor_dev); -#endif /* 2.6.24 */ + if (FW_SUPPORTED((&dhd->pub), monitor)) { + scan_suppress = FALSE; + /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */ + ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress, + sizeof(scan_suppress), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret)); + } + } + if (dhd->monitor_dev) { + if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) { + free_netdev(dhd->monitor_dev); + } else { + unregister_netdevice(dhd->monitor_dev); + } dhd->monitor_dev = NULL; } } static void -dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val) +dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val) { - dhd_info_t *info = dhd->info; + dhd_info_t *dhd = pub->info; DHD_TRACE(("%s: val %d\n", __FUNCTION__, val)); - if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) { - DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__)); - return; - } - /* Delete monitor */ + dhd_net_if_lock_local(dhd); if (!val) { - info->monitor_type = val; - dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL, - dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); - return; + /* Delete monitor */ + dhd_del_monitor_if(dhd); + } else { + /* Add monitor */ + dhd_add_monitor_if(dhd); } - - /* Add monitor */ - info->monitor_type = val; - dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD, - dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); + dhd->monitor_type = val; + dhd_net_if_unlock_local(dhd); } #endif /* WL_MONITOR */ @@ -8042,6 +6236,7 @@ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_bu bcmerror = BCME_UNSUPPORTED; goto done; } + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); #ifdef WL_MONITOR @@ -8132,14 +6327,12 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) } #endif /* defined(WL_WIRELESS_EXT) */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) if (cmd == SIOCETHTOOL) { ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } -#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ if (cmd == SIOCDEVPRIVATE+1) { ret = wl_android_priv_cmd(net, ifr); @@ -8292,9 +6485,7 @@ int trigger_deep_sleep = 0; static int dhd_init_cpufreq_fix(dhd_info_t *dhd) { if (dhd) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->cpufreq_fix); -#endif // endif dhd->cpufreq_fix_status = FALSE; } return 0; @@ -8302,9 +6493,7 @@ static int dhd_init_cpufreq_fix(dhd_info_t *dhd) static void dhd_fix_cpu_freq(dhd_info_t *dhd) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_lock(&dhd->cpufreq_fix); -#endif // endif if (dhd && !dhd->cpufreq_fix_status) { pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); #ifdef FIX_BUS_MIN_CLOCK @@ -8314,20 +6503,14 @@ static void dhd_fix_cpu_freq(dhd_info_t *dhd) dhd->cpufreq_fix_status = TRUE; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_unlock(&dhd->cpufreq_fix); -#endif // endif } static void dhd_rollback_cpu_freq(dhd_info_t *dhd) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_lock(&dhd ->cpufreq_fix); -#endif // endif if (dhd && dhd->cpufreq_fix_status != TRUE) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_unlock(&dhd->cpufreq_fix); -#endif // endif return; } @@ -8338,9 +6521,7 @@ static void dhd_rollback_cpu_freq(dhd_info_t *dhd) DHD_ERROR(("pm_qos_add_requests called\n")); dhd->cpufreq_fix_status = FALSE; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_unlock(&dhd->cpufreq_fix); -#endif // endif } #endif /* FIX_CPU_MIN_CLOCK */ @@ -8380,7 +6561,7 @@ dhd_stop(struct net_device *net) dhd_info_t *dhd = DHD_DEV_INFO(net); DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); - printf("%s: Enter %p\n", __FUNCTION__, net); + printf("%s: Enter %s\n", __FUNCTION__, net->name); dhd->pub.rxcnt_timeout = 0; dhd->pub.txcnt_timeout = 0; @@ -8404,16 +6585,19 @@ dhd_stop(struct net_device *net) ifidx = dhd_net2idx(dhd, net); BCM_REFERENCE(ifidx); - /* Set state and stop OS transmissions */ - netif_stop_queue(net); + DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx)); + #if defined(WL_STATIC_IF) && defined(WL_CFG80211) /* If static if is operational, don't reset the chip */ if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) { - DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n")); + DHD_ERROR(("static if operational. skip chip reset.\n")); skip_reset = true; + wl_cfg80211_sta_ifdown(net); goto exit; } #endif /* WL_STATIC_IF && WL_CFG80211 */ + + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); #ifdef WL_CFG80211 /* Disable Runtime PM before interface down */ @@ -8437,12 +6621,19 @@ dhd_stop(struct net_device *net) * when the primary Interface is brought down. [ifconfig wlan0 down] */ if (!dhd_download_fw_on_driverload) { + DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0); if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { int i; #ifdef WL_CFG80211_P2P_DEV_IF wl_cfg80211_del_p2p_wdev(net); #endif /* WL_CFG80211_P2P_DEV_IF */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + dhd_cleanup_m4_state_work(&dhd->pub, ifidx); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_clear(&dhd->pub); +#endif /* DHD_PKTDUMP_ROAM */ dhd_net_if_lock_local(dhd); for (i = 1; i < DHD_MAX_IFS; i++) @@ -8546,25 +6737,32 @@ dhd_stop(struct net_device *net) OLD_MOD_DEC_USE_COUNT; exit: + if (skip_reset == false) { #if defined(WL_WIRELESS_EXT) - if (ifidx == 0) { + if (ifidx == 0) { + wl_iw_down(net, &dhd->pub); + } +#endif /* defined(WL_WIRELESS_EXT) */ #ifdef WL_ESCAN - wl_escan_down(&dhd->pub); -#else - wl_iw_down(&dhd->pub); + if (ifidx == 0) { + wl_escan_down(net, &dhd->pub); + } #endif /* WL_ESCAN */ - } -#endif /* defined(WL_WIRELESS_EXT) */ - if (skip_reset == false) { if (ifidx == 0 && !dhd_download_fw_on_driverload) { #if defined(BT_OVER_SDIO) dhd_bus_put(&dhd->pub, WLAN_MODULE); wl_android_set_wifi_on_flag(FALSE); #else wl_android_wifi_off(net, TRUE); +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) #ifdef WL_EXT_IAPSTA wl_ext_iapsta_dettach_netdev(net, ifidx); -#endif +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(net, &dhd->pub); +#endif /* WL_ESCAN */ + wl_ext_event_dettach_netdev(net, ifidx); +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ #endif /* BT_OVER_SDIO */ } #ifdef SUPPORT_DEEP_SLEEP @@ -8580,6 +6778,7 @@ exit: } #endif /* SUPPORT_DEEP_SLEEP */ dhd->pub.hang_was_sent = 0; + dhd->pub.hang_was_pending = 0; /* Clear country spec for for built-in type driver */ if (!dhd_download_fw_on_driverload) { @@ -8603,7 +6802,7 @@ exit: DHD_OS_WAKE_LOCK_DESTROY(dhd); dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; } - printf("%s: Exit\n", __FUNCTION__); + printf("%s: Exit %s\n", __FUNCTION__, net->name); mutex_unlock(&dhd->pub.ndev_op_sync); return 0; @@ -8646,7 +6845,6 @@ dhd_open(struct net_device *net) #endif #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT) int bytes_written = 0; - struct dhd_conf *conf; #endif mutex_lock(&dhd->pub.ndev_op_sync); @@ -8666,7 +6864,7 @@ dhd_open(struct net_device *net) } } - printf("%s: Enter %p\n", __FUNCTION__, net); + printf("%s: Enter %s\n", __FUNCTION__, net->name); DHD_MUTEX_LOCK(); /* Init wakelock */ if (!dhd_download_fw_on_driverload) { @@ -8695,15 +6893,18 @@ dhd_open(struct net_device *net) DHD_PERIM_LOCK(&dhd->pub); dhd->pub.dongle_trap_occured = 0; dhd->pub.hang_was_sent = 0; + dhd->pub.hang_was_pending = 0; dhd->pub.hang_reason = 0; dhd->pub.iovar_timeout_occured = 0; #ifdef PCIE_FULL_DONGLE dhd->pub.d3ack_timeout_occured = 0; dhd->pub.livelock_occured = 0; + dhd->pub.pktid_audit_failed = 0; #endif /* PCIE_FULL_DONGLE */ -#ifdef DHD_MAP_LOGGING + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; dhd->pub.smmu_fault_occurred = 0; -#endif /* DHD_MAP_LOGGING */ #ifdef DHD_LOSSLESS_ROAMING dhd->pub.dequeue_prec_map = ALLPRIO; @@ -8743,9 +6944,16 @@ dhd_open(struct net_device *net) atomic_set(&dhd->pend_8021x_cnt, 0); if (!dhd_download_fw_on_driverload) { DHD_ERROR(("\n%s\n", dhd_version)); + DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0); +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) + wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx); +#ifdef WL_ESCAN + wl_escan_event_attach(net, &dhd->pub); +#endif /* WL_ESCAN */ #ifdef WL_EXT_IAPSTA wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx); -#endif +#endif /* WL_EXT_IAPSTA */ +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME) g_first_broadcast_scan = TRUE; #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */ @@ -8884,9 +7092,6 @@ dhd_open(struct net_device *net) skb_queue_head_init(&dhd->rx_napi_queue); } /* rx_napi_netdev == NULL */ #endif /* DHD_LB_RXP */ -#ifdef DHD_LB_IRQSET - dhd_irq_set_affinity(&dhd->pub); -#endif /* DHD_LB_IRQSET */ #if defined(DHD_LB_TXP) /* Use the variant that uses locks */ @@ -8915,6 +7120,15 @@ dhd_open(struct net_device *net) #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ } +#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON) + dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE); +#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */ +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) + dhd_irq_set_affinity(&dhd->pub, cpumask_of(0)); +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ +#ifdef DHD_LB_IRQSET + dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary); +#endif /* DHD_LB_IRQSET */ #if defined(ARGOS_NOTIFY_CB) argos_register_notifier_init(net); #endif // endif @@ -8923,28 +7137,30 @@ dhd_open(struct net_device *net) #endif /* NUM_SCB_MAX_PROBE */ #endif /* WL_CFG80211 */ #if defined(WL_WIRELESS_EXT) + if (unlikely(wl_iw_up(net, &dhd->pub))) { + DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__)); + ret = -1; + goto exit; + } +#endif #ifdef WL_ESCAN if (unlikely(wl_escan_up(net, &dhd->pub))) { DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__)); ret = -1; goto exit; } -#endif -#endif -#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT) +#endif /* WL_ESCAN */ +#if defined(ISAM_PREINIT) if (!dhd_download_fw_on_driverload) { - conf = dhd_get_conf(net); - if (conf) { - wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written); - wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written); - wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written); + if (dhd->pub.conf) { + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written); + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written); + wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written); } } #endif } - /* Allow transmit calls */ - netif_start_queue(net); dhd->pub.up = 1; if (wl_event_enable) { @@ -8961,23 +7177,64 @@ dhd_open(struct net_device *net) dhd->pub.logtrace_pkt_sendup = false; } - OLD_MOD_INC_USE_COUNT; + OLD_MOD_INC_USE_COUNT; + +#ifdef BCMDBGFS + dhd_dbgfs_init(&dhd->pub); +#endif // endif + +exit: + mutex_unlock(&dhd->pub.ndev_op_sync); + if (ret) { + dhd_stop(net); + } + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + DHD_MUTEX_UNLOCK(); + + printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret); + return ret; +} + +/* + * ndo_start handler for primary ndev + */ +static int +dhd_pri_open(struct net_device *net) +{ + s32 ret; + + ret = dhd_open(net); + if (unlikely(ret)) { + DHD_ERROR(("Failed to open primary dev ret %d\n", ret)); + return ret; + } + + /* Allow transmit calls */ + netif_start_queue(net); + DHD_ERROR(("[%s] tx queue started\n", net->name)); + return ret; +} -#ifdef BCMDBGFS - dhd_dbgfs_init(&dhd->pub); -#endif // endif +/* + * ndo_stop handler for primary ndev + */ +static int +dhd_pri_stop(struct net_device *net) +{ + s32 ret; -exit: - mutex_unlock(&dhd->pub.ndev_op_sync); - if (ret) { - dhd_stop(net); - } + /* stop tx queue */ + netif_stop_queue(net); + DHD_ERROR(("[%s] tx queue stopped\n", net->name)); - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - DHD_MUTEX_UNLOCK(); + ret = dhd_stop(net); + if (unlikely(ret)) { + DHD_ERROR(("dhd_stop failed: %d\n", ret)); + return ret; + } - printf("%s: Exit ret=%d\n", __FUNCTION__, ret); return ret; } @@ -9002,7 +7259,7 @@ dhd_static_if_open(struct net_device *net) goto done; } - DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name)); + printf("%s: Enter %s\n", __FUNCTION__, net->name); /* Ensure fw is initialized. If it is already initialized, * dhd_open will return success. */ @@ -9018,6 +7275,7 @@ dhd_static_if_open(struct net_device *net) netif_start_queue(net); } done: + printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret); return ret; } @@ -9027,8 +7285,9 @@ dhd_static_if_stop(struct net_device *net) struct bcm_cfg80211 *cfg; struct net_device *primary_netdev = NULL; int ret = BCME_OK; + dhd_info_t *dhd = DHD_DEV_INFO(net); - DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name)); + printf("%s: Enter %s\n", __FUNCTION__, net->name); /* Ensure queue is disabled */ netif_tx_disable(net); @@ -9041,13 +7300,21 @@ dhd_static_if_stop(struct net_device *net) ret = wl_cfg80211_static_if_close(net); + if (dhd->pub.up == 0) { + /* If fw is down, return */ + DHD_ERROR(("fw down\n")); + return BCME_OK; + } /* If STA iface is not in operational, invoke dhd_close from this * context. */ primary_netdev = bcmcfg_to_prmry_ndev(cfg); if (!(primary_netdev->flags & IFF_UP)) { ret = dhd_stop(primary_netdev); + } else { + DHD_ERROR(("Skipped dhd_stop, as sta is operational\n")); } + printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret); return ret; } @@ -9381,8 +7648,28 @@ dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx, } else if (ndev->name[0] != '\0') { strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ); } - if (mac != NULL) - memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN); + if (mac != NULL) { + (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN); + } +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) + wl_ext_event_attach_netdev(ndev, ifidx, bssidx); +#ifdef WL_ESCAN + wl_escan_event_attach(ndev, dhdp); +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_ifadding(ndev, ifidx); + wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx); + wl_ext_iapsta_attach_name(ndev, ifidx); +#endif /* WL_EXT_IAPSTA */ + } else if (if_state == NDEV_STATE_FW_IF_DELETED) { +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(ndev, cur_idx); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(ndev, dhdp); +#endif /* WL_ESCAN */ + wl_ext_event_dettach_netdev(ndev, cur_idx); +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ } DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n", ifidx, cur_idx, if_state)); @@ -9510,6 +7797,20 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + ifp->recv_reassoc_evt = FALSE; + ifp->post_roam_evt = FALSE; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler); + dhd_reset_tcpsync_info_by_ifp(ifp); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + return ifp->net; fail: @@ -9536,16 +7837,19 @@ fail: } static void -dhd_cleanup_ifp(dhd_pub_t *dhdp, s32 ifidx) +dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp) { - dhd_if_t *ifp; - dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; #ifdef PCIE_FULL_DONGLE + s32 ifidx = 0; if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; #endif /* PCIE_FULL_DONGLE */ - ifp = dhdinfo->iflist[ifidx]; if (ifp != NULL) { + if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) { + DHD_ERROR(("Wrong idx:%d \n", ifp->idx)); + ASSERT(0); + return; + } #ifdef DHD_L2_FILTER bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, NULL, FALSE, dhdpub->tickcnt); @@ -9556,6 +7860,7 @@ dhd_cleanup_ifp(dhd_pub_t *dhdp, s32 ifidx) dhd_if_del_sta_list(ifp); #ifdef PCIE_FULL_DONGLE /* Delete flowrings of virtual interface */ + ifidx = ifp->idx; if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) { dhd_flow_rings_delete(dhdp, ifidx); } @@ -9577,7 +7882,7 @@ dhd_cleanup_if(struct net_device *net) return; } - dhd_cleanup_ifp(dhdp, ifp->idx); + dhd_cleanup_ifp(dhdp, ifp); } /* unregister and free the the net_device interface associated with the indexed @@ -9590,11 +7895,19 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; dhd_if_t *ifp; unsigned long flags; - u32 timeout; + long timeout; ifp = dhdinfo->iflist[ifidx]; if (ifp != NULL) { +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + cancel_delayed_work_sync(&ifp->m4state_work); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHDTCPSYNC_FLOOD_BLK + cancel_work_sync(&ifp->blk_tsfl_work); +#endif /* DHDTCPSYNC_FLOOD_BLK */ + #ifdef WL_STATIC_IF /* static IF will be handled in detach */ if (ifp->static_if) { @@ -9644,16 +7957,22 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) unregister_netdev(ifp->net); else unregister_netdevice(ifp->net); +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) #ifdef WL_EXT_IAPSTA - wl_ext_iapsta_dettach_netdev(ifp->net, ifidx); -#endif + wl_ext_iapsta_dettach_netdev(ifp->net, ifidx); +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_event_dettach(ifp->net, dhdpub); +#endif /* WL_ESCAN */ + wl_ext_event_dettach_netdev(ifp->net, ifidx); +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ } ifp->net = NULL; DHD_GENERAL_LOCK(dhdpub, flags); ifp->del_in_progress = false; DHD_GENERAL_UNLOCK(dhdpub, flags); } - dhd_cleanup_ifp(dhdpub, ifidx); + dhd_cleanup_ifp(dhdpub, ifp); DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); @@ -9663,10 +7982,9 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) return BCME_OK; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) static struct net_device_ops dhd_ops_pri = { - .ndo_open = dhd_open, - .ndo_stop = dhd_stop, + .ndo_open = dhd_pri_open, + .ndo_stop = dhd_pri_stop, .ndo_get_stats = dhd_get_stats, #ifdef DHD_PCIE_NATIVE_RUNTIMEPM .ndo_do_ioctl = dhd_ioctl_entry_wrapper, @@ -9703,7 +8021,6 @@ static struct net_device_ops dhd_ops_virt = { .ndo_set_multicast_list = dhd_set_multicast_list, #endif // endif }; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ int dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf, @@ -9714,7 +8031,7 @@ dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf, if (!fp || !buf || buflen == 0) return -1; - if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0) + if (compat_vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0) return -1; *posn = wr_posn; @@ -9950,43 +8267,6 @@ fail1: return error; } /* dhd_init_static_strs_array */ -static int -dhd_trace_open_proc(struct inode *inode, struct file *file) -{ - return single_open(file, 0, NULL); -} - -ssize_t -dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff) -{ - trace_buf_info_t *trace_buf_info; - int ret = BCME_ERROR; - - ASSERT(g_dhd_pub); - mutex_lock(&g_dhd_pub->dhd_trace_lock); - trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh, - sizeof(trace_buf_info_t)); - if (trace_buf_info) { - dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info); - if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt))) - { - ret = -EFAULT; - goto exit; - } - if (trace_buf_info->availability == BUF_NOT_AVAILABLE) - ret = BUF_NOT_AVAILABLE; - else - ret = trace_buf_info->size; - } else - DHD_ERROR(("Memory allocation Failed\n")); - -exit: - if (trace_buf_info) { - MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); - } - mutex_unlock(&g_dhd_pub->dhd_trace_lock); - return ret; -} #endif /* SHOW_LOGTRACE */ #ifdef DHD_ERPOM @@ -10006,11 +8286,7 @@ dhd_wlan_power_off_handler(void *handler, unsigned char reason) /* save core dump to a file */ if (dhdp->memdump_enabled) { #ifdef DHD_SSSR_DUMP - if (dhdp->sssr_inited) { - dhdp->info->no_wq_sssrdump = TRUE; - dhd_bus_sssr_dump(dhdp); - dhdp->info->no_wq_sssrdump = FALSE; - } + dhdp->collect_sssr = TRUE; #endif /* DHD_SSSR_DUMP */ dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT; dhd_bus_mem_dump(dhdp); @@ -10152,7 +8428,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #endif /* BT_OVER_SDIO */ g_dhd_pub = &dhd->pub; - DHD_INFO(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); #ifdef DHD_DEBUG dll_init(&(dhd->pub.mw_list_head)); @@ -10188,6 +8463,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen /* Link to bus module */ dhd->pub.bus = bus; dhd->pub.hdrlen = bus_hdrlen; + dhd->pub.txoff = FALSE; /* dhd_conf must be attached after linking dhd to dhd->pub.info, * because dhd_detech will check .info is NULL or not. @@ -10232,11 +8508,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen /* initialize the l2_filter_cnt */ dhd->pub.l2_filter_cnt = 0; #endif // endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) - net->open = NULL; -#else net->netdev_ops = NULL; -#endif // endif mutex_init(&dhd->dhd_iovar_mutex); sema_init(&dhd->proto_sem, 1); @@ -10272,7 +8544,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen init_waitqueue_head(&dhd->dmaxfer_wait); init_waitqueue_head(&dhd->pub.tx_completion_wait); dhd->pub.dhd_bus_busy_state = 0; - /* Initialize the spinlocks */ spin_lock_init(&dhd->sdlock); spin_lock_init(&dhd->txqlock); @@ -10281,9 +8552,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #ifdef WLTDLS spin_lock_init(&dhd->pub.tdls_lock); #endif /* WLTDLS */ -#if defined(PCIE_FULL_DONGLE) - spin_lock_init(&dhd->backplane_access_lock); -#endif /* defined(PCIE_FULL_DONGLE) */ #if defined(RXFRAME_THREAD) dhd->rxthread_enabled = TRUE; #endif /* defined(RXFRAME_THREAD) */ @@ -10304,13 +8572,11 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); #endif /* CONFIG_HAS_WAKELOCK */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->dhd_net_if_mutex); mutex_init(&dhd->dhd_suspend_mutex); #if defined(PKT_FILTER_SUPPORT) && defined(APF) mutex_init(&dhd->dhd_apf_mutex); #endif /* PKT_FILTER_SUPPORT && APF */ -#endif // endif dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; /* Attach and link in the protocol */ @@ -10332,27 +8598,33 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen dhd_state |= DHD_ATTACH_STATE_CFG80211; #endif // endif -#if defined(WL_WIRELESS_EXT) +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) + if (wl_ext_event_attach(net, &dhd->pub) != 0) { + DHD_ERROR(("wl_ext_event_attach failed\n")); + goto fail; + } #ifdef WL_ESCAN + /* Attach and link in the escan */ if (wl_escan_attach(net, &dhd->pub) != 0) { DHD_ERROR(("wl_escan_attach failed\n")); goto fail; } -#else +#endif /* WL_ESCAN */ +#ifdef WL_EXT_IAPSTA + if (wl_ext_iapsta_attach(&dhd->pub) != 0) { + DHD_ERROR(("wl_ext_iapsta_attach failed\n")); + goto fail; +} +#endif /* WL_EXT_IAPSTA */ +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ +#if defined(WL_WIRELESS_EXT) /* Attach and link in the iw */ if (wl_iw_attach(net, &dhd->pub) != 0) { DHD_ERROR(("wl_iw_attach failed\n")); goto fail; } dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; -#endif /* WL_ESCAN */ #endif /* defined(WL_WIRELESS_EXT) */ -#ifdef WL_EXT_IAPSTA - if (wl_ext_iapsta_attach(&dhd->pub) != 0) { - DHD_ERROR(("wl_ext_iapsta_attach failed\n")); - goto fail; - } -#endif #ifdef SHOW_LOGTRACE ret = dhd_init_logstrs_array(osh, &dhd->event_data); @@ -10364,13 +8636,12 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen } #endif /* SHOW_LOGTRACE */ -#ifdef DEBUGABILITY /* attach debug if support */ if (dhd_os_dbg_attach(&dhd->pub)) { DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__)); goto fail; } - +#ifdef DEBUGABILITY #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT) /* enable verbose ring to support dump_trace_buf */ dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0); @@ -10384,9 +8655,20 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #endif /* DBG_PKT_MON */ #endif /* DEBUGABILITY */ +#ifdef DHD_STATUS_LOGGING + dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM, + MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN); + if (dhd->pub.statlog == NULL) { + DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__)); + } +#endif /* DHD_STATUS_LOGGING */ + #ifdef DHD_LOG_DUMP dhd_log_dump_init(&dhd->pub); #endif /* DHD_LOG_DUMP */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_init(&dhd->pub); +#endif /* DHD_PKTDUMP_ROAM */ if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); @@ -10410,13 +8692,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #ifndef BCMDBUS /* Set up the watchdog timer */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) - timer_setup(&dhd->timer, dhd_watchdog, 0); -#else - init_timer(&dhd->timer); - dhd->timer.data = (ulong)dhd; - dhd->timer.function = dhd_watchdog; -#endif + init_timer_compat(&dhd->timer, dhd_watchdog, dhd); dhd->default_wd_interval = dhd_watchdog_ms; if (dhd_watchdog_prio >= 0) { @@ -10432,9 +8708,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #ifdef SHOW_LOGTRACE skb_queue_head_init(&dhd->evt_trace_queue); - if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL) - DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n")); - mutex_init(&dhd->pub.dhd_trace_lock); + + /* Create ring proc entries */ + dhd_dbg_ring_proc_create(&dhd->pub); #endif /* SHOW_LOGTRACE */ /* Set up the bottom half handler */ @@ -10495,6 +8771,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen } #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); + INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process); #ifdef DEBUG_CPU_FREQ dhd->new_freq = alloc_percpu(int); dhd->freq_trans.notifier_call = dhd_cpufreq_notifier; @@ -10551,6 +8828,11 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen #endif /* DHD_LB_TXP_DEFAULT_ENAB */ #endif /* DHD_LB_TXP */ +#ifdef DHD_LB_RXP + /* Trun ON the feature by default */ + atomic_set(&dhd->lb_rxp_active, 1); +#endif /* DHD_LB_RXP */ + /* Initialize the Load Balancing Tasklets and Napi object */ #if defined(DHD_LB_TXC) tasklet_init(&dhd->tx_compl_tasklet, @@ -10558,7 +8840,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn); DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__)); #endif /* DHD_LB_TXC */ - #if defined(DHD_LB_RXC) tasklet_init(&dhd->rx_compl_tasklet, dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); @@ -10586,11 +8867,21 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE; #endif /* DHD_LB */ +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn); +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ + #if defined(BCMPCIE) dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN); if (dhd->pub.extended_trap_data == NULL) { DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__)); } +#ifdef DNGL_AXI_ERROR_LOGGING + dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t)); + if (dhd->pub.axi_err_dump == NULL) { + DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__)); + } +#endif /* DNGL_AXI_ERROR_LOGGING */ #endif /* BCMPCIE && ETD */ #ifdef SHOW_LOGTRACE @@ -10631,6 +8922,8 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen } #endif /* DHD_DUMP_MNGR */ #ifdef DHD_FW_COREDUMP + /* Set memdump default values */ + dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON; /* Check the memdump capability */ dhd_get_memdump_info(&dhd->pub); #endif /* DHD_FW_COREDUMP */ @@ -10988,20 +9281,37 @@ dhd_bus_start(dhd_pub_t *dhdp) DHD_TRACE(("Enter %s:\n", __FUNCTION__)); dhdp->dongle_trap_occured = 0; +#ifdef DHD_SSSR_DUMP + /* Flag to indicate sssr dump is collected */ + dhdp->sssr_dump_collected = 0; +#endif /* DHD_SSSR_DUMP */ dhdp->iovar_timeout_occured = 0; #ifdef PCIE_FULL_DONGLE dhdp->d3ack_timeout_occured = 0; dhdp->livelock_occured = 0; + dhdp->pktid_audit_failed = 0; #endif /* PCIE_FULL_DONGLE */ -#ifdef DHD_MAP_LOGGING + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; + /* Clear induced error during initialize */ + dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR; + + /* set default value for now. Will be updated again in dhd_preinit_ioctls() + * after querying FW + */ + dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS; + dhdp->event_log_max_sets_queried = FALSE; dhdp->smmu_fault_occurred = 0; -#endif /* DHD_MAP_LOGGING */ +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->axi_error = FALSE; +#endif /* DNGL_AXI_ERROR_LOGGING */ DHD_PERIM_LOCK(dhdp); /* try to download image and nvram to the dongle */ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { /* Indicate FW Download has not yet done */ - dhd->pub.fw_download_done = FALSE; + dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS; DHD_INFO(("%s download fw %s, nv %s, conf %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); #if defined(DHD_DEBUG) && defined(BCMSDIO) @@ -11019,7 +9329,7 @@ dhd_bus_start(dhd_pub_t *dhdp) return ret; } /* Indicate FW Download has succeeded */ - dhd->pub.fw_download_done = TRUE; + dhd->pub.fw_download_status = FW_DOWNLOAD_DONE; } if (dhd->pub.busstate != DHD_BUS_LOAD) { DHD_PERIM_UNLOCK(dhdp); @@ -11296,6 +9606,10 @@ int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event) uint32 reason = ntoh32(event->reason); unsigned long flags; + /* No handling needed for peer discovered reason */ + if (reason == WLC_E_TDLS_PEER_DISCOVERED) { + return BCME_ERROR; + } if (reason == WLC_E_TDLS_PEER_CONNECTED) connect = TRUE; else if (reason == WLC_E_TDLS_PEER_DISCONNECTED) @@ -11490,6 +9804,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint32 buf_key_b4_m4 = 1; uint8 msglen; eventmsgs_ext_t *eventmask_msg = NULL; + uint32 event_log_max_sets = 0; char* iov_buf = NULL; int ret2 = 0; uint32 wnm_cap = 0; @@ -11527,10 +9842,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint32 cac = 1; #endif /* SUPPORT_SET_CAC */ -#if defined(DHD_NON_DMA_M2M_CORRUPTION) - dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk; -#endif /* DHD_NON_DMA_M2M_CORRUPTION */ - #ifdef DHD_ENABLE_LPC uint32 lpc = 1; #endif /* DHD_ENABLE_LPC */ @@ -11549,7 +9860,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ uint retry_max = CUSTOM_ASSOC_RETRY_MAX; #if defined(ARP_OFFLOAD_SUPPORT) - int arpoe = 1; + int arpoe = 0; #endif // endif int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME; int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME; @@ -11648,6 +9959,12 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef BCMPCIE_OOB_HOST_WAKE uint32 hostwake_oob = 0; #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef EVENT_LOG_RATE_HC + /* threshold number of lines per second */ +#define EVENT_LOG_RATE_HC_THRESHOLD 1000 + uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD; +#endif /* EVENT_LOG_RATE_HC */ + wl_wlc_version_t wlc_ver; #ifdef PKT_FILTER_SUPPORT dhd_pkt_filter_enable = TRUE; @@ -11655,16 +9972,18 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd->apf_set = FALSE; #endif /* APF */ #endif /* PKT_FILTER_SUPPORT */ -#ifdef WLTDLS - dhd->tdls_enable = FALSE; - dhd_tdls_set_mode(dhd, false); -#endif /* WLTDLS */ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; #ifdef ENABLE_MAX_DTIM_IN_SUSPEND dhd->max_dtim_enable = TRUE; #else dhd->max_dtim_enable = FALSE; #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + dhd->disable_dtim_in_suspend = FALSE; +#ifdef SUPPORT_SET_TID + dhd->tid_mode = SET_TID_OFF; + dhd->target_uid = 0; + dhd->target_tid = 0; +#endif /* SUPPORT_SET_TID */ DHD_TRACE(("Enter %s\n", __FUNCTION__)); #ifdef DHDTCPACK_SUPPRESS @@ -11677,6 +9996,26 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG; #endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */ + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + strncpy(fw_version, buf, FW_VER_STR_LEN); + fw_version[FW_VER_STR_LEN-1] = '\0'; + } + + /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */ + if (strstr(fw_version, "WLTEST") != NULL) { + DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n", + __FUNCTION__)); + op_mode = DHD_FLAG_MFG_MODE; + } + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || (op_mode == DHD_FLAG_MFG_MODE)) { dhd->op_mode = DHD_FLAG_MFG_MODE; @@ -11708,10 +10047,43 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } } #endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DNGL_AXI_ERROR_LOGGING + ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr, + sizeof(dhd->axierror_logbuf_addr), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__)); + dhd->axierror_logbuf_addr = 0; + } else { + DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__, + dhd->axierror_logbuf_addr)); + } +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef EVENT_LOG_RATE_HC + ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc, + sizeof(event_log_rate_hc), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret)); + } else { + DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__, + event_log_rate_hc)); + } +#endif /* EVENT_LOG_RATE_HC */ + #ifdef GET_CUSTOM_MAC_ENABLE + memset(hw_ether, 0, sizeof(hw_ether)); ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether); +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + if (!memcmp(ðer_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) { + ret = 0; + } else +#endif if (!ret) { memset(buf, 0, sizeof(buf)); +#ifdef GET_CUSTOM_MAC_FROM_CONFIG + memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether)); +#endif bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr)); bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); @@ -11720,14 +10092,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf)); ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); if (ret) { - int i; DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n", __FUNCTION__, MAC2STRDBG(hw_ether), ret)); - for (i=0; iconf->sd_f2_blocksize) - dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize); -#endif #ifdef DISABLE_PRUNED_SCAN if (FW_SUPPORTED(dhd, rsdb)) { @@ -12004,14 +10370,10 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef BCMCCX ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE); #endif /* BCMCCX */ + #ifdef WLTDLS -#ifdef ENABLE_TDLS_AUTO_MODE - /* by default TDLS on and auto mode on */ - _dhd_tdls_enable(dhd, true, true, NULL); -#else - /* by default TDLS on and auto mode off */ - _dhd_tdls_enable(dhd, true, false, NULL); -#endif /* ENABLE_TDLS_AUTO_MODE */ + dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); #endif /* WLTDLS */ #ifdef DHD_ENABLE_LPC @@ -12122,6 +10484,21 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret)); } + ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets, + sizeof(event_log_max_sets), FALSE); + if (ret == BCME_OK) { + dhd->event_log_max_sets = event_log_max_sets; + } else { + dhd->event_log_max_sets = NUM_EVENT_LOG_SETS; + } + /* Make sure max_sets is set first with wmb and then sets_queried, + * this will be used during parsing the logsets in the reverse order. + */ + OSL_SMP_WMB(); + dhd->event_log_max_sets_queried = TRUE; + DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n", + __FUNCTION__, dhd->event_log_max_sets, ret)); + #ifdef DISABLE_TXBFR ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL, 0, TRUE); @@ -12278,6 +10655,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret)); } #endif /* DISABLE_11N_PROPRIETARY_RATES */ +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +#if defined(DISABLE_HE_ENAB) + control_he_enab = 0; +#endif /* DISABLE_HE_ENAB */ + dhd_control_he_enab(dhd, control_he_enab); +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + #ifdef CUSTOM_PSPRETEND_THR /* Turn off MPC in AP mode */ ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr, @@ -12354,7 +10738,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* PNO_SUPPORT */ /* enable dongle roaming event */ #ifdef WL_CFG80211 +#if !defined(ROAM_EVT_DISABLE) setbit(eventmask, WLC_E_ROAM); +#endif /* !ROAM_EVT_DISABLE */ setbit(eventmask, WLC_E_BSSID); #endif /* WL_CFG80211 */ #ifdef BCMCCX @@ -12461,6 +10847,17 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef WL_MBO setbit(eventmask_msg->mask, WLC_E_MBO); #endif /* WL_MBO */ +#ifdef WL_BCNRECV + setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED); +#endif /* WL_BCNRECV */ +#ifdef WL_CAC_TS + setbit(eventmask_msg->mask, WLC_E_ADDTS_IND); + setbit(eventmask_msg->mask, WLC_E_DELTS_IND); +#endif /* WL_CAC_TS */ +#ifdef WL_CHAN_UTIL + setbit(eventmask_msg->mask, WLC_E_BSS_LOAD); +#endif /* WL_CHAN_UTIL */ + /* Write updated Event mask */ eventmask_msg->ver = EVENTMSGS_VER; eventmask_msg->command = EVENTMSGS_SET_MASK; @@ -12611,36 +11008,51 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if (ret < 0) DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); else { - char *clmver_temp_buf = NULL; + char *ver_temp_buf = NULL, *ver_date_buf = NULL; + int len; - if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { + if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { DHD_ERROR(("Couldn't find \"Data:\"\n")); } else { - ptr = (clmver_temp_buf + strlen("Data:")); - if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { + ver_date_buf = bcmstrstr(buf, "Creation:"); + ptr = (ver_temp_buf + strlen("Data:")); + if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { DHD_ERROR(("Couldn't find New line character\n")); } else { memset(clm_version, 0, CLM_VER_STR_LEN); - strncpy(clm_version, clmver_temp_buf, - MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1)); + len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf); + if (ver_date_buf) { + ptr = (ver_date_buf + strlen("Creation:")); + ver_date_buf = bcmstrtok(&ptr, "\n", 0); + if (ver_date_buf) + snprintf(clm_version+len, CLM_VER_STR_LEN-1-len, + " (%s)", ver_date_buf); + } + DHD_INFO(("CLM version = %s\n", clm_version)); } } + + if (strlen(clm_version)) { + DHD_INFO(("CLM version = %s\n", clm_version)); + } else { + DHD_ERROR(("Couldn't find CLM version!\n")); + } } + dhd_set_version_info(dhd, fw_version); - /* query for 'ver' to get version info from firmware */ - memset(buf, 0, sizeof(buf)); - ptr = buf; - ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); +#ifdef WRITE_WLANINFO + sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version); +#endif /* WRITE_WLANINFO */ + + /* query for 'wlc_ver' to get version info from firmware */ + memset(&wlc_ver, 0, sizeof(wl_wlc_version_t)); + ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver, + sizeof(wl_wlc_version_t), FALSE); if (ret < 0) DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); else { - bcmstrtok(&ptr, "\n", 0); - strncpy(fw_version, buf, FW_VER_STR_LEN); - fw_version[FW_VER_STR_LEN-1] = '\0'; - dhd_set_version_info(dhd, buf); -#ifdef WRITE_WLANINFO - sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version); -#endif /* WRITE_WLANINFO */ + dhd->wlc_ver_major = wlc_ver.wlc_ver_major; + dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor; } #ifdef GEN_SOFTAP_INFO_FILE sec_save_softap_info(); @@ -12794,31 +11206,11 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } } -#if defined(DHD_NON_DMA_M2M_CORRUPTION) +#ifdef DHD_NON_DMA_M2M_CORRUPTION /* check pcie non dma loopback */ - if (dhd->op_mode == DHD_FLAG_MFG_MODE) { - memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t)); - pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH; - pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK; - pcie_dmaxfer_lpbk.wait = TRUE; - - if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0, - (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t), - IOV_SET)) < 0) { - DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n", - ret)); - goto done; - } - - if (pcie_dmaxfer_lpbk.u.status != BCME_OK) { - DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d" - " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status)); - ret = BCME_ERROR; + if (dhd->op_mode == DHD_FLAG_MFG_MODE && + (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) { goto done; - } else { - - DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n")); - } } #endif /* DHD_NON_DMA_M2M_CORRUPTION */ @@ -12828,17 +11220,17 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) | WL_WNM_BSSTRANS | WL_WNM_NOTIF #endif // endif ; +#if defined(WL_MBO) && defined(WL_OCE) + if (FW_SUPPORTED(dhd, estm)) { + wnm_cap |= WL_WNM_ESTM; + } +#endif /* WL_MBO && WL_OCE */ if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) { DHD_ERROR(("failed to set WNM capabilities\n")); } if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) { - if (dhd_start_ecounters(dhd) != BCME_OK) { - DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); - } else if (dhd_start_event_ecounters(dhd) != BCME_OK) { - DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); - } - + dhd_ecounter_configure(dhd, TRUE); } /* store the preserve log set numbers */ @@ -12857,6 +11249,11 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } #endif /* WL_MONITOR */ +#ifdef CONFIG_SILENT_ROAM + dhd->sroam_turn_on = TRUE; + dhd->sroamed = FALSE; +#endif /* CONFIG_SILENT_ROAM */ + dhd_conf_postinit_ioctls(dhd); done: @@ -13102,7 +11499,6 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, if (!ifa || !(ifa->ifa_dev->dev)) return NOTIFY_DONE; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) /* Filter notifications meant for non Broadcom devices */ if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) && (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) { @@ -13111,7 +11507,6 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, #endif /* WL_ENABLE_P2P_IF */ return NOTIFY_DONE; } -#endif /* LINUX_VERSION_CODE */ dhd = DHD_DEV_INFO(ifa->ifa_dev->dev); if (!dhd) @@ -13161,6 +11556,7 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, __FUNCTION__)); aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx); #endif /* AOE_IP_ALIAS_SUPPORT */ + dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE); break; case NETDEV_DOWN: @@ -13179,6 +11575,7 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, dhd_aoe_hostip_clr(&dhd->pub, idx); dhd_aoe_arp_clr(&dhd->pub, idx); } + dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE); break; default: @@ -13300,12 +11697,10 @@ int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event struct ipv6_work_info_t *ndo_info; int idx; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) /* Filter notifications meant for non Broadcom devices */ if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) { return NOTIFY_DONE; } -#endif /* LINUX_VERSION_CODE */ dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); if (!dhd) { @@ -13342,6 +11737,29 @@ int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event } #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +/* Network attach to be invoked from the bus probe handlers */ +int +dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock) +{ + struct net_device *primary_ndev; + BCM_REFERENCE(primary_ndev); + + /* Register primary net device */ + if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) { + return BCME_ERROR; + } + +#if defined(WL_CFG80211) + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (wl_cfg80211_net_attach(primary_ndev) < 0) { + /* fail the init */ + dhd_remove_if(dhdp, 0, TRUE); + return BCME_ERROR; + } +#endif /* WL_CFG80211 */ + return BCME_OK; +} + int dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) { @@ -13363,36 +11781,15 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) net = ifp->net; ASSERT(net && (ifp->idx == ifidx)); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) - ASSERT(!net->open); - net->get_stats = dhd_get_stats; -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM - net->do_ioctl = dhd_ioctl_entry_wrapper; - net->hard_start_xmit = dhd_start_xmit_wrapper; -#else - net->do_ioctl = dhd_ioctl_entry; - net->hard_start_xmit = dhd_start_xmit; -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ - - net->set_mac_address = dhd_set_mac_address; - net->set_multicast_list = dhd_set_multicast_list; - net->open = net->stop = NULL; -#else ASSERT(!net->netdev_ops); net->netdev_ops = &dhd_ops_virt; -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ /* Ok, link into the network layer... */ if (ifidx == 0) { /* * device functions for the primary interface only */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) - net->open = dhd_open; - net->stop = dhd_stop; -#else net->netdev_ops = &dhd_ops_pri; -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ if (!ETHER_ISNULLADDR(dhd->pub.mac.octet)) memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); } else { @@ -13414,9 +11811,7 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) } net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) net->ethtool_ops = &dhd_ethtool_ops; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ #if defined(WL_WIRELESS_EXT) #if WIRELESS_EXT < 19 @@ -13440,10 +11835,17 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) if (ifidx == 0) printf("%s\n", dhd_version); + else { +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) + wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx); +#ifdef WL_ESCAN + wl_escan_event_attach(net, dhdp); +#endif /* WL_ESCAN */ #ifdef WL_EXT_IAPSTA - else wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx); -#endif +#endif /* WL_EXT_IAPSTA */ +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ + } if (ifidx != 0) { if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0) DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); @@ -13460,11 +11862,20 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err)); goto fail; } + if (ifidx == 0) { +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) + wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx); +#ifdef WL_ESCAN + wl_escan_event_attach(net, dhdp); +#endif /* WL_ESCAN */ #ifdef WL_EXT_IAPSTA - if (ifidx == 0) wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx); +#endif /* WL_EXT_IAPSTA */ +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ + } +#ifdef WL_EXT_IAPSTA wl_ext_iapsta_attach_name(net, ifidx); -#endif +#endif /* WL_EXT_IAPSTA */ printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, MAC2STRDBG(net->dev_addr)); @@ -13473,8 +11884,7 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) // wl_iw_iscan_set_scan_broadcast_prep(net, 1); #endif // endif -#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ - KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS)) +#if (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS)) if (ifidx == 0) { #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) up(&dhd_registration_sem); @@ -13509,20 +11919,14 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) #endif /* BT_OVER_SDIO */ } #if defined(WL_WIRELESS_EXT) -#ifdef WL_ESCAN - wl_escan_down(&dhd->pub); -#endif /* WL_ESCAN */ + wl_iw_down(net, &dhd->pub); #endif /* defined(WL_WIRELESS_EXT) */ } -#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */ +#endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */ return 0; fail: -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) - net->open = NULL; -#else net->netdev_ops = NULL; -#endif // endif return err; } @@ -13599,6 +12003,7 @@ void dhd_detach(dhd_pub_t *dhdp) DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); dhd->pub.up = 0; if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { /* Give sufficient time for threads to start running in case @@ -13667,18 +12072,20 @@ void dhd_detach(dhd_pub_t *dhdp) #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ #if defined(WL_WIRELESS_EXT) -#ifdef WL_ESCAN - wl_escan_detach(dhdp); -#else if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { /* Detatch and unlink in the iw */ - wl_iw_detach(dhdp); + wl_iw_detach(dev, dhdp); } -#endif /* WL_ESCAN */ #endif /* defined(WL_WIRELESS_EXT) */ +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN) #ifdef WL_EXT_IAPSTA wl_ext_iapsta_dettach(dhdp); -#endif +#endif /* WL_EXT_IAPSTA */ +#ifdef WL_ESCAN + wl_escan_detach(dev, dhdp); +#endif /* WL_ESCAN */ + wl_ext_event_dettach(dhdp); +#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */ #ifdef DHD_ULP dhd_ulp_deinit(dhd->pub.osh, dhdp); @@ -13807,6 +12214,10 @@ void dhd_detach(dhd_pub_t *dhdp) } #endif /* DHD_LB */ +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + cancel_work_sync(&dhd->axi_error_dispatcher_work); +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ + DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub); #ifdef WL_CFG80211 @@ -13833,18 +12244,26 @@ void dhd_detach(dhd_pub_t *dhdp) dhd_os_dbg_detach_pkt_monitor(dhdp); dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock); #endif /* DBG_PKT_MON */ - dhd_os_dbg_detach(dhdp); } #endif /* DEBUGABILITY */ + if (dhdp->dbg) { + dhd_os_dbg_detach(dhdp); + } +#ifdef DHD_STATUS_LOGGING + dhd_detach_statlog(dhdp); +#endif /* DHD_STATUS_LOGGING */ +#ifdef DHD_PKTDUMP_ROAM + dhd_dump_pkt_deinit(dhdp); +#endif /* DHD_PKTDUMP_ROAM */ #ifdef SHOW_LOGTRACE /* Release the skbs from queue for WLC_E_TRACE event */ dhd_event_logtrace_flush_queue(dhdp); /* Wait till event logtrace context finishes */ dhd_cancel_logtrace_process_sync(dhd); - mutex_lock(&dhd->pub.dhd_trace_lock); - remove_proc_entry("dhd_trace", NULL); - mutex_unlock(&dhd->pub.dhd_trace_lock); + + /* Remove ring proc entries */ + dhd_dbg_ring_proc_destroy(&dhd->pub); if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { if (dhd->event_data.fmts) { @@ -13926,7 +12345,7 @@ void dhd_detach(dhd_pub_t *dhdp) dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head)); #endif /* DHD_DEBUG */ #ifdef WL_MONITOR - dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL); + dhd_del_monitor_if(dhd); #endif /* WL_MONITOR */ #ifdef DHD_ERPOM @@ -13935,6 +12354,8 @@ void dhd_detach(dhd_pub_t *dhdp) } #endif /* DHD_ERPOM */ + cancel_work_sync(&dhd->dhd_hang_process_work); + /* Prefer adding de-init code above this comment unless necessary. * The idea is to cancel work queue, sysfs and flags at the end. */ @@ -13951,6 +12372,13 @@ void dhd_detach(dhd_pub_t *dhdp) MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN); dhdp->extended_trap_data = NULL; } +#ifdef DNGL_AXI_ERROR_LOGGING + if (dhdp->axi_err_dump) + { + MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t)); + dhdp->axi_err_dump = NULL; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ #endif /* BCMPCIE */ #ifdef DHD_DUMP_MNGR @@ -13960,7 +12388,7 @@ void dhd_detach(dhd_pub_t *dhdp) } #endif /* DHD_DUMP_MNGR */ dhd_sysfs_exit(dhd); - dhd->pub.fw_download_done = FALSE; + dhd->pub.fw_download_status = FW_UNLOADED; #if defined(BT_OVER_SDIO) mutex_destroy(&dhd->bus_user_lock); @@ -14156,7 +12584,6 @@ dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unuse return NOTIFY_DONE; } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH) #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \ @@ -14176,9 +12603,6 @@ late_initcall_sync(dhd_module_init); #else late_initcall(dhd_module_init); #endif /* USE_LATE_INITCALL_SYNC */ -#else -module_init(dhd_module_init); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ module_exit(dhd_module_exit); @@ -14300,31 +12724,18 @@ dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) } int -dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched) +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition) { dhd_info_t * dhd = (dhd_info_t *)(pub->info); - int timeout, timeout_tmp = dhd_ioctl_timeout_msec; - - if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) { - timeout_tmp = dhd_ioctl_timeout_msec; - dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec; - } + int timeout; /* Convert timeout in millsecond to jiffies */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); -#else - timeout = dhd_ioctl_timeout_msec * HZ / 1000; -#endif // endif DHD_PERIM_UNLOCK(pub); timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); - if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) { - dhd_ioctl_timeout_msec = timeout_tmp; - } - DHD_PERIM_LOCK(pub); return timeout; @@ -14346,11 +12757,7 @@ dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) int timeout; /* Convert timeout in millsecond to jiffies */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT); -#else - timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000; -#endif // endif DHD_PERIM_UNLOCK(pub); @@ -14381,11 +12788,7 @@ dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) * so that IOCTL timeout should not get affected. */ /* Convert timeout in millsecond to jiffies */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); -#else - timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; -#endif // endif timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); @@ -14404,11 +12807,7 @@ dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition) int timeout; /* Convert timeout in millsecond to jiffies */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); -#else - timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; -#endif // endif timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout); @@ -14428,11 +12827,7 @@ dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, int timeout; /* Convert timeout in millsecond to jiffies */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); -#else - timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; -#endif // endif timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, ((*var & bitmask) == condition), timeout); @@ -14445,9 +12840,12 @@ dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition) { int ret = 0; dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT); DHD_PERIM_UNLOCK(pub); - ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition)); + ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout); DHD_PERIM_LOCK(pub); return ret; @@ -14471,11 +12869,8 @@ dhd_os_tx_completion_wake(dhd_pub_t *dhd) wake_up(&dhd->tx_completion_wait); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) /* Fix compilation error for FC11 */ -INLINE -#endif // endif -int +INLINE int dhd_os_busbusy_wake(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); @@ -14492,4458 +12887,5411 @@ dhd_os_wd_timer_extend(void *bus, bool extend) dhd_pub_t *pub = bus; dhd_info_t *dhd = (dhd_info_t *)pub->info; - if (extend) - dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); - else - dhd_os_wd_timer(bus, dhd->default_wd_interval); -#endif /* !BCMDBUS */ + if (extend) + dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); + else + dhd_os_wd_timer(bus, dhd->default_wd_interval); +#endif /* !BCMDBUS */ +} + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ +#ifndef BCMDBUS + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + DHD_GENERAL_UNLOCK(pub, flags); +#ifdef BCMSDIO + if (!wdtick) { + DHD_OS_WD_WAKE_UNLOCK(pub); + } +#endif /* BCMSDIO */ + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->timer); +#ifdef BCMSDIO + DHD_OS_WD_WAKE_UNLOCK(pub); +#endif /* BCMSDIO */ + return; + } + + if (wdtick) { +#ifdef BCMSDIO + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; +#endif /* BCMSDIO */ + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); +#endif /* !BCMDBUS */ +} + +void * +dhd_os_open_image1(dhd_pub_t *pub, char *filename) +{ + struct file *fp; + int size; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); + fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); + +err: + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + int size; + + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size)); + + if (len >= size && size != rdlen) { + return -EIO; + } + + if (rdlen > 0) { + fp->f_pos += rdlen; + } + + return rdlen; +} + +#if defined(BT_OVER_SDIO) +int +dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rd_len; + uint str_len = 0; + char *str_end = NULL; + + if (!image) + return 0; + + rd_len = compat_kernel_read(fp, fp->f_pos, str, len); + str_end = strnchr(str, len, '\n'); + if (str_end == NULL) { + goto err; + } + str_len = (uint)(str_end - str); + + /* Advance file pointer past the string length */ + fp->f_pos += str_len + 1; + bzero(str_end, rd_len - str_len); + +err: + return str_len; +} +#endif /* defined (BT_OVER_SDIO) */ + +int +dhd_os_get_image_size(void *image) +{ + struct file *fp = (struct file *)image; + int size; + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + + return size; +} + +void +dhd_os_close_image1(dhd_pub_t *pub, void *image) +{ + if (image) { + filp_close((struct file *)image, NULL); + } +} + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_lock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_unlock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_lock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_unlock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +static void +dhd_os_rxflock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxf_lock); + } -void -dhd_os_wd_timer(void *bus, uint wdtick) +static void +dhd_os_rxfunlock(dhd_pub_t *pub) { -#ifndef BCMDBUS - dhd_pub_t *pub = bus; - dhd_info_t *dhd = (dhd_info_t *)pub->info; - unsigned long flags; + dhd_info_t *dhd; - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxf_lock); +} - if (!dhd) { - DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); - return; - } +#ifdef DHDTCPACK_SUPPRESS +unsigned long +dhd_os_tcpacklock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; - DHD_GENERAL_LOCK(pub, flags); + dhd = (dhd_info_t *)(pub->info); - /* don't start the wd until fw is loaded */ - if (pub->busstate == DHD_BUS_DOWN) { - DHD_GENERAL_UNLOCK(pub, flags); + if (dhd) { #ifdef BCMSDIO - if (!wdtick) { - DHD_OS_WD_WAKE_UNLOCK(pub); - } + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_lock_irqsave(&dhd->tcpack_lock, flags); #endif /* BCMSDIO */ - return; } - /* Totally stop the timer */ - if (!wdtick && dhd->wd_timer_valid == TRUE) { - dhd->wd_timer_valid = FALSE; - DHD_GENERAL_UNLOCK(pub, flags); - del_timer_sync(&dhd->timer); + return flags; +} + +void +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + #ifdef BCMSDIO - DHD_OS_WD_WAKE_UNLOCK(pub); + BCM_REFERENCE(flags); #endif /* BCMSDIO */ - return; - } - if (wdtick) { + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { #ifdef BCMSDIO - DHD_OS_WD_WAKE_LOCK(pub); - dhd_watchdog_ms = (uint)wdtick; + spin_unlock_bh(&dhd->tcpack_lock); +#else + spin_unlock_irqrestore(&dhd->tcpack_lock, flags); #endif /* BCMSDIO */ - /* Re arm the timer, at last watchdog period */ - mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); - dhd->wd_timer_valid = TRUE; } - DHD_GENERAL_UNLOCK(pub, flags); -#endif /* !BCMDBUS */ } +#endif /* DHDTCPACK_SUPPRESS */ -void * -dhd_os_open_image1(dhd_pub_t *pub, char *filename) +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) { - struct file *fp; - int size; + uint8* buf; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; - fp = filp_open(filename, O_RDONLY, 0); - /* - * 2.6.11 (FC4) supports filp_open() but later revs don't? - * Alternative: - * fp = open_namei(AT_FDCWD, filename, O_RD, 0); - * ??? - */ - if (IS_ERR(fp)) { - fp = NULL; - goto err; - } + buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); - if (!S_ISREG(file_inode(fp)->i_mode)) { - DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); - fp = NULL; - goto err; - } + return buf; +} - size = i_size_read(file_inode(fp)); - if (size <= 0) { - DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); - fp = NULL; - goto err; - } +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +{ +} - DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); -err: - return fp; + if (!dhd->pub.up) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; } +#endif /* defined(WL_WIRELESS_EXT) */ -int -dhd_os_get_image_block(char *buf, int len, void *image) +static int +dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event, void **data) { - struct file *fp = (struct file *)image; - int rdlen; - int size; + int bcmerror = 0; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#endif /* WL_CFG80211 */ + ASSERT(dhd != NULL); - if (!image) { - return 0; +#ifdef SHOW_LOGTRACE + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + &dhd->event_data); +#else + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + NULL); +#endif /* SHOW_LOGTRACE */ + if (unlikely(bcmerror != BCME_OK)) { + return bcmerror; } - size = i_size_read(file_inode(fp)); - rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size)); - - if (len >= size && size != rdlen) { - return -EIO; + if (ntoh32(event->event_type) == WLC_E_IF) { + /* WLC_E_IF event types are consumed by wl_process_host_event. + * For ifadd/del ops, the netdev ptr may not be valid at this + * point. so return before invoking cfg80211/wext handlers. + */ + return BCME_OK; } - if (rdlen > 0) { - fp->f_pos += rdlen; +#if defined(WL_EXT_IAPSTA) || defined(USE_IW) + wl_ext_event_send(dhd->pub.event_params, event, *data); +#endif + +#ifdef WL_CFG80211 + if (dhd->iflist[ifidx]->net) { + spin_lock_irqsave(&dhd->pub.up_lock, flags); + if (dhd->pub.up) { + wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data); + } + spin_unlock_irqrestore(&dhd->pub.up_lock, flags); } +#endif /* defined(WL_CFG80211) */ - return rdlen; + return (bcmerror); } -#if defined(BT_OVER_SDIO) -int -dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image) +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) { - struct file *fp = (struct file *)image; - int rd_len; - uint str_len = 0; - char *str_end = NULL; + switch (ntoh32(event->event_type)) { + /* Handle error case or further events here */ + default: + break; + } +} - if (!image) - return 0; +#ifdef LOG_INTO_TCPDUMP +void +dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +{ + struct sk_buff *p, *skb; + uint32 pktlen; + int len; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *skb_data; + int ifidx = 0; + struct ether_header eth; - rd_len = compat_kernel_read(fp, fp->f_pos, str, len); - str_end = strnchr(str, len, '\n'); - if (str_end == NULL) { - goto err; - } - str_len = (uint)(str_end - str); + pktlen = sizeof(eth) + data_len; + dhd = dhdp->info; - /* Advance file pointer past the string length */ - fp->f_pos += str_len + 1; - bzero(str_end, rd_len - str_len); + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); -err: - return str_len; + bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(ð.ether_shost); + eth.ether_type = hton16(ETHER_TYPE_BRCM); + + bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); + bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); + skb = PKTTONATIVE(dhdp->osh, p); + skb_data = skb->data; + len = skb->len; + + ifidx = dhd_ifname2idx(dhd, "wlan0"); + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->data = skb_data; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); + } } -#endif /* defined (BT_OVER_SDIO) */ +#endif /* LOG_INTO_TCPDUMP */ -int -dhd_os_get_image_size(void *image) +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) { - struct file *fp = (struct file *)image; - int size; - if (!image) { - return 0; - } +#if defined(BCMSDIO) + struct dhd_info *dhdinfo = dhd->info; - size = i_size_read(file_inode(fp)); + int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); - return size; -} + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif /* defined(BCMSDIO) */ + return; +} /* dhd_init_static_strs_array */ -void -dhd_os_close_image1(dhd_pub_t *pub, void *image) +void dhd_wait_event_wakeup(dhd_pub_t *dhd) { - if (image) { - filp_close((struct file *)image, NULL); - } +#if defined(BCMSDIO) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif // endif + return; } -void -dhd_os_sdlock(dhd_pub_t *pub) +#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) +int +dhd_net_bus_devreset(struct net_device *dev, uint8 flag) { - dhd_info_t *dhd; + int ret; - dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = DHD_DEV_INFO(dev); -#ifdef BCMDBUS - spin_lock_bh(&dhd->sdlock); -#else - if (dhd_dpc_prio >= 0) - down(&dhd->sdsem); - else - spin_lock_bh(&dhd->sdlock); -#endif /* !BCMDBUS */ -} +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ -void -dhd_os_sdunlock(dhd_pub_t *pub) -{ - dhd_info_t *dhd; + if (flag == TRUE) { + /* Issue wl down command before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif // endif +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ - dhd = (dhd_info_t *)(pub->info); +#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT) + dhd_os_dbg_detach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON */ + } -#ifdef BCMDBUS - spin_unlock_bh(&dhd->sdlock); -#else - if (dhd_dpc_prio >= 0) - up(&dhd->sdsem); - else - spin_unlock_bh(&dhd->sdlock); -#endif /* !BCMDBUS */ -} +#ifdef BCMSDIO + if (!flag) { + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); + } +#endif /* BCMSDIO */ -void -dhd_os_sdlock_txq(dhd_pub_t *pub) -{ - dhd_info_t *dhd; + ret = dhd_bus_devreset(&dhd->pub, flag); - dhd = (dhd_info_t *)(pub->info); -#ifdef BCMDBUS - spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags); -#else - spin_lock_bh(&dhd->txqlock); -#endif /* BCMDBUS */ -} +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ -void -dhd_os_sdunlock_txq(dhd_pub_t *pub) -{ - dhd_info_t *dhd; + if (flag) { + /* Clear some flags for recovery logic */ + dhd->pub.dongle_trap_occured = 0; + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; + dhd->pub.livelock_occured = 0; + dhd->pub.pktid_audit_failed = 0; +#endif /* PCIE_FULL_DONGLE */ + dhd->pub.iface_op_failed = 0; + dhd->pub.scan_timeout_occurred = 0; + dhd->pub.scan_busy_occurred = 0; + dhd->pub.smmu_fault_occurred = 0; + } - dhd = (dhd_info_t *)(pub->info); -#ifdef BCMDBUS - spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags); -#else - spin_unlock_bh(&dhd->txqlock); -#endif /* BCMDBUS */ -} + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + } -void -dhd_os_sdlock_rxq(dhd_pub_t *pub) -{ + return ret; } -void -dhd_os_sdunlock_rxq(dhd_pub_t *pub) +#ifdef BCMSDIO +int +dhd_net_bus_suspend(struct net_device *dev) { + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_suspend(&dhd->pub); } -static void -dhd_os_rxflock(dhd_pub_t *pub) +int +dhd_net_bus_resume(struct net_device *dev, uint8 stage) { - dhd_info_t *dhd; - - dhd = (dhd_info_t *)(pub->info); - spin_lock_bh(&dhd->rxf_lock); - + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_resume(&dhd->pub, stage); } -static void -dhd_os_rxfunlock(dhd_pub_t *pub) -{ - dhd_info_t *dhd; - - dhd = (dhd_info_t *)(pub->info); - spin_unlock_bh(&dhd->rxf_lock); -} +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE || BCMDBUS */ -#ifdef DHDTCPACK_SUPPRESS -unsigned long -dhd_os_tcpacklock(dhd_pub_t *pub) +int net_os_set_suspend_disable(struct net_device *dev, int val) { - dhd_info_t *dhd; - unsigned long flags = 0; - - dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; if (dhd) { -#ifdef BCMSDIO - spin_lock_bh(&dhd->tcpack_lock); -#else - spin_lock_irqsave(&dhd->tcpack_lock, flags); -#endif /* BCMSDIO */ + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; } - - return flags; + return ret; } -void -dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +int net_os_set_suspend(struct net_device *dev, int val, int force) { - dhd_info_t *dhd; - -#ifdef BCMSDIO - BCM_REFERENCE(flags); -#endif /* BCMSDIO */ - - dhd = (dhd_info_t *)(pub->info); + int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); if (dhd) { -#ifdef BCMSDIO - spin_unlock_bh(&dhd->tcpack_lock); +#ifdef CONFIG_MACH_UNIVERSAL7420 +#endif /* CONFIG_MACH_UNIVERSAL7420 */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); #else - spin_unlock_irqrestore(&dhd->tcpack_lock, flags); -#endif /* BCMSDIO */ + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif // endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); +#endif // endif } + return ret; } -#endif /* DHDTCPACK_SUPPRESS */ -uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) +int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) { - uint8* buf; - gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + dhd_info_t *dhd = DHD_DEV_INFO(dev); - buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); - if (buf == NULL && kmalloc_if_fail) - buf = kmalloc(size, flags); + if (dhd) { + DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n", + __FUNCTION__, val)); + dhd->pub.suspend_bcn_li_dtim = val; + } - return buf; + return 0; } -void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +int net_os_set_max_dtim_enable(struct net_device *dev, int val) { + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.max_dtim_enable = TRUE; + } else { + dhd->pub.max_dtim_enable = FALSE; + } + } else { + return -1; + } + + return 0; } -#if defined(WL_WIRELESS_EXT) -struct iw_statistics * -dhd_get_wireless_stats(struct net_device *dev) +#ifdef DISABLE_DTIM_IN_SUSPEND +int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val) { - int res = 0; dhd_info_t *dhd = DHD_DEV_INFO(dev); - if (!dhd->pub.up) { - return NULL; + if (dhd) { + DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.disable_dtim_in_suspend = TRUE; + } else { + dhd->pub.disable_dtim_in_suspend = FALSE; + } + } else { + return -1; } - res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); - - if (res == 0) - return &dhd->iw.wstats; - else - return NULL; + return 0; } -#endif /* defined(WL_WIRELESS_EXT) */ +#endif /* DISABLE_DTIM_IN_SUSPEND */ -static int -dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, - wl_event_msg_t *event, void **data) +#ifdef PKT_FILTER_SUPPORT +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) { - int bcmerror = 0; -#ifdef WL_CFG80211 - unsigned long flags = 0; -#endif /* WL_CFG80211 */ - ASSERT(dhd != NULL); + int ret = 0; -#ifdef SHOW_LOGTRACE - bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, - &dhd->event_data); -#else - bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, - NULL); -#endif /* SHOW_LOGTRACE */ - if (unlikely(bcmerror != BCME_OK)) { - return bcmerror; - } +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd_info_t *dhd = DHD_DEV_INFO(dev); - if (ntoh32(event->event_type) == WLC_E_IF) { - /* WLC_E_IF event types are consumed by wl_process_host_event. - * For ifadd/del ops, the netdev ptr may not be valid at this - * point. so return before invoking cfg80211/wext handlers. - */ - return BCME_OK; + if (!dhd_master_mode) + add_remove = !add_remove; + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) { + return 0; } -#if defined(WL_EXT_IAPSTA) - wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data); -#endif /* defined(WL_EXT_IAPSTA) */ -#if defined(WL_WIRELESS_EXT) - if (event->bsscfgidx == 0) { - /* - * Wireless ext is on primary interface only - */ - ASSERT(dhd->iflist[ifidx] != NULL); - ASSERT(dhd->iflist[ifidx]->net != NULL); - - if (dhd->iflist[ifidx]->net) { - wl_iw_event(dhd->iflist[ifidx]->net, event, *data); - } +#ifdef BLOCK_IPV6_PACKET + /* customer want to use NO IPV6 packets only */ + if (num == DHD_MULTICAST6_FILTER_NUM) { + return 0; } -#endif /* defined(WL_WIRELESS_EXT) */ +#endif /* BLOCK_IPV6_PACKET */ -#ifdef WL_CFG80211 - if (dhd->iflist[ifidx]->net) { - spin_lock_irqsave(&dhd->pub.up_lock, flags); - if (dhd->pub.up) { - wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data); - } - spin_unlock_irqrestore(&dhd->pub.up_lock, flags); + if (num >= dhd->pub.pktfilter_count) { + return -EINVAL; } -#endif /* defined(WL_CFG80211) */ - return (bcmerror); + ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num); +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + + return ret; } -/* send up locally generated event */ -void -dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) + { - switch (ntoh32(event->event_type)) { - /* Handle error case or further events here */ - default: - break; + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_enable_packet_filter(val, dhdp); + } } + return ret; } -#ifdef LOG_INTO_TCPDUMP -void -dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +/* function to enable/disable packet for Network device */ +int net_os_enable_packet_filter(struct net_device *dev, int val) { - struct sk_buff *p, *skb; - uint32 pktlen; - int len; - dhd_if_t *ifp; - dhd_info_t *dhd; - uchar *skb_data; - int ifidx = 0; - struct ether_header eth; - - pktlen = sizeof(eth) + data_len; - dhd = dhdp->info; - - if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { - ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); - - bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); - bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); - ETHER_TOGGLE_LOCALADDR(ð.ether_shost); - eth.ether_type = hton16(ETHER_TYPE_BRCM); - - bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); - bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); - skb = PKTTONATIVE(dhdp->osh, p); - skb_data = skb->data; - len = skb->len; + dhd_info_t *dhd = DHD_DEV_INFO(dev); - ifidx = dhd_ifname2idx(dhd, "wlan0"); - ifp = dhd->iflist[ifidx]; - if (ifp == NULL) - ifp = dhd->iflist[0]; + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); + return dhd_os_enable_packet_filter(&dhd->pub, val); +} +#endif /* PKT_FILTER_SUPPORT */ - ASSERT(ifp); - skb->dev = ifp->net; - skb->protocol = eth_type_trans(skb, skb->dev); - skb->data = skb_data; - skb->len = len; +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret; - /* Strip header, count, deliver upward */ - skb_pull(skb, ETH_HLEN); + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) + goto done; - bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, - __FUNCTION__, __LINE__); - /* Send the packet */ - if (in_interrupt()) { - netif_rx(skb); - } else { - netif_rx_ni(skb); - } - } else { - /* Could not allocate a sk_buf */ - DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); - } +done: + return ret; } -#endif /* LOG_INTO_TCPDUMP */ -void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +int +dhd_dev_get_feature_set(struct net_device *dev) { -#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) - struct dhd_info *dhdinfo = dhd->info; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); -#else - int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; - dhd_os_sdunlock(dhd); - wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); - dhd_os_sdlock(dhd); -#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ - return; -} /* dhd_init_static_strs_array */ + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + if (dhd->rtt_supported) { + feature_set |= WIFI_FEATURE_D2D_RTT; + feature_set |= WIFI_FEATURE_D2AP_RTT; + } +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ -void dhd_wait_event_wakeup(dhd_pub_t *dhd) -{ -#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) - struct dhd_info *dhdinfo = dhd->info; - if (waitqueue_active(&dhdinfo->ctrl_wait)) - wake_up(&dhdinfo->ctrl_wait); -#endif // endif - return; +#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO) + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; +#ifdef GSCAN_SUPPORT + /* terence 20171115: remove to get GTS PASS + * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp + */ +// feature_set |= WIFI_FEATURE_GSCAN; +// feature_set |= WIFI_FEATURE_HAL_EPNO; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */ +#ifdef RSSI_MONITOR_SUPPORT + if (FW_SUPPORTED(dhd, rssi_mon)) { + feature_set |= WIFI_FEATURE_RSSI_MONITOR; + } +#endif /* RSSI_MONITOR_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ +#ifdef NDO_CONFIG_SUPPORT + feature_set |= WIFI_FEATURE_CONFIG_NDO; +#endif /* NDO_CONFIG_SUPPORT */ +#ifdef KEEP_ALIVE + feature_set |= WIFI_FEATURE_MKEEP_ALIVE; +#endif /* KEEP_ALIVE */ +#ifdef FILTER_IE + if (FW_SUPPORTED(dhd, fie)) { + feature_set |= WIFI_FEATURE_FILTER_IE; + } +#endif /* FILTER_IE */ +#ifdef ROAMEXP_SUPPORT + /* Check if the Android O roam feature is supported by FW */ + if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) { + feature_set |= WIFI_FEATURE_CONTROL_ROAMING; + } +#endif /* ROAMEXP_SUPPORT */ + return feature_set; } -#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) int -dhd_net_bus_devreset(struct net_device *dev, uint8 flag) +dhd_dev_get_feature_set_matrix(struct net_device *dev, int num) { - int ret; - - dhd_info_t *dhd = DHD_DEV_INFO(dev); - -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM - if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) - return BCME_ERROR; -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ - - if (flag == TRUE) { - /* Issue wl down command before resetting the chip */ - if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { - DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); - } -#ifdef PROP_TXSTATUS - if (dhd->pub.wlfc_enabled) { - dhd_wlfc_deinit(&dhd->pub); - } -#endif /* PROP_TXSTATUS */ -#ifdef PNO_SUPPORT - if (dhd->pub.pno_state) { - dhd_pno_deinit(&dhd->pub); - } -#endif // endif -#ifdef RTT_SUPPORT - if (dhd->pub.rtt_state) { - dhd_rtt_deinit(&dhd->pub); - } -#endif /* RTT_SUPPORT */ + int feature_set_full; + int ret = 0; -#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT) - dhd_os_dbg_detach_pkt_monitor(&dhd->pub); -#endif /* DBG_PKT_MON */ - } + feature_set_full = dhd_dev_get_feature_set(dev); -#ifdef BCMSDIO - if (!flag) { - dhd_update_fw_nv_path(dhd); - /* update firmware and nvram path to sdio bus */ - dhd_bus_update_fw_nv_path(dhd->pub.bus, - dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); - } -#endif /* BCMSDIO */ + /* Common feature set for all interface */ + ret = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) | + (feature_set_full & WIFI_FEATURE_EPR); - ret = dhd_bus_devreset(&dhd->pub, flag); + /* Specific feature group for each interface */ + switch (num) { + case 0: + ret |= (feature_set_full & WIFI_FEATURE_P2P) | + /* Not supported yet */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_HAL_EPNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA); + break; -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM - pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); - pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + case 1: + ret |= (feature_set_full & WIFI_FEATURE_P2P); + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + break; - if (flag) { - /* Clear some flags for recovery logic */ - dhd->pub.dongle_trap_occured = 0; - dhd->pub.iovar_timeout_occured = 0; -#ifdef PCIE_FULL_DONGLE - dhd->pub.d3ack_timeout_occured = 0; - dhd->pub.livelock_occured = 0; -#endif /* PCIE_FULL_DONGLE */ -#ifdef DHD_MAP_LOGGING - dhd->pub.smmu_fault_occurred = 0; -#endif /* DHD_MAP_LOGGING */ - } + case 2: + ret |= (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL); + break; - if (ret) { - DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + default: + ret = WIFI_FEATURE_INVALID; + DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num)); + break; } return ret; } -#ifdef BCMSDIO +#ifdef CUSTOM_FORCE_NODFS_FLAG int -dhd_net_bus_suspend(struct net_device *dev) +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) { dhd_info_t *dhd = DHD_DEV_INFO(dev); - return dhd_bus_suspend(&dhd->pub); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; } +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef NDO_CONFIG_SUPPORT int -dhd_net_bus_resume(struct net_device *dev, uint8 stage) +dhd_dev_ndo_cfg(struct net_device *dev, u8 enable) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return dhd_bus_resume(&dhd->pub, stage); -} + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; -#endif /* BCMSDIO */ -#endif /* BCMSDIO || BCMPCIE || BCMDBUS */ + if (enable) { + /* enable ND offload feature (will be enabled in FW on suspend) */ + dhdp->ndo_enable = TRUE; -int net_os_set_suspend_disable(struct net_device *dev, int val) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - int ret = 0; + /* Update changes of anycast address & DAD failed address */ + ret = dhd_dev_ndo_update_inet6addr(dev); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret)); + return ret; + } + } else { + /* disable ND offload feature */ + dhdp->ndo_enable = FALSE; - if (dhd) { - ret = dhd->pub.suspend_disable_flag; - dhd->pub.suspend_disable_flag = val; + /* disable ND offload in FW */ + ret = dhd_ndo_enable(dhdp, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret)); + } } return ret; } -int net_os_set_suspend(struct net_device *dev, int val, int force) +/* #pragma used as a WAR to fix build failure, +* ignore dropping of 'const' qualifier in 'list_entry' macro +* this pragma disables the warning only for the following function +*/ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" + +static int +dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6) { - int ret = 0; - dhd_info_t *dhd = DHD_DEV_INFO(dev); + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + int addr_count = 0; - if (dhd) { -#ifdef CONFIG_MACH_UNIVERSAL7420 -#endif /* CONFIG_MACH_UNIVERSAL7420 */ -#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) - ret = dhd_set_suspend(val, &dhd->pub); -#else - ret = dhd_suspend_resume_helper(dhd, val, force); -#endif // endif -#ifdef WL_CFG80211 - wl_cfg80211_update_power_mode(dev); -#endif // endif + /* lock */ + read_lock_bh(&inet6->lock); + + /* Count valid unicast address */ + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + if ((ifa->flags & IFA_F_DADFAILED) == 0) { + addr_count++; + } } - return ret; + + /* Count anycast address */ + acaddr = inet6->ac_list; + while (acaddr) { + addr_count++; + acaddr = acaddr->aca_next; + } + + /* unlock */ + read_unlock_bh(&inet6->lock); + + return addr_count; } -int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) +int +dhd_dev_ndo_update_inet6addr(struct net_device *dev) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_dev *inet6; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + struct in6_addr *ipv6_addr = NULL; + int cnt, i; + int ret = BCME_OK; - if (dhd) - dhd->pub.suspend_bcn_li_dtim = val; + /* + * this function evaulates host ip address in struct inet6_dev + * unicast addr in inet6_dev->addr_list + * anycast addr in inet6_dev->ac_list + * while evaluating inet6_dev, read_lock_bh() is required to prevent + * access on null(freed) pointer. + */ - return 0; -} + if (dev) { + inet6 = dev->ip6_ptr; + if (!inet6) { + DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd = DHD_DEV_INFO(dev); + if (!dhd) { + DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); + return BCME_ERROR; + } + dhdp = &dhd->pub; + + if (dhd_net2idx(dhd, dev) != 0) { + DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Check host IP overflow */ + cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6); + if (cnt > dhdp->ndo_max_host_ip) { + if (!dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = TRUE; + /* Disable ND offload in FW */ + DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); + } + + return ret; + } -int net_os_set_max_dtim_enable(struct net_device *dev, int val) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); + /* + * Allocate ipv6 addr buffer to store addresses to be added/removed. + * driver need to lock inet6_dev while accessing structure. but, driver + * cannot use ioctl while inet6_dev locked since it requires scheduling + * hence, copy addresses to the buffer and do ioctl after unlock. + */ + ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh, + sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + if (!ipv6_addr) { + DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } - if (dhd) { - DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n", - __FUNCTION__, (val ? "Enable" : "Disable"))); - if (val) { - dhd->pub.max_dtim_enable = TRUE; - } else { - dhd->pub.max_dtim_enable = FALSE; + /* Find DAD failed unicast address to be removed */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* DAD failed unicast address */ + if ((ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr)); + cnt++; } - } else { - return -1; } + read_unlock_bh(&inet6->lock); - return 0; -} - -#if defined(PCIE_FULL_DONGLE) -void -dhd_pcie_backplane_access_lock(dhd_pub_t * pub) -{ - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - spin_lock_bh(&dhd->backplane_access_lock); -} + /* Remove DAD failed unicast address */ + for (i = 0; i < cnt; i++) { + DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0); + if (ret < 0) { + goto done; + } + } -void -dhd_pcie_backplane_access_unlock(dhd_pub_t * pub) -{ - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - spin_unlock_bh(&dhd->backplane_access_lock); -} -#endif /* defined(PCIE_FULL_DONGLE) */ + /* Remove all anycast address */ + ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } -#ifdef PKT_FILTER_SUPPORT -int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) -{ - int ret = 0; + /* + * if ND offload was disabled due to host ip overflow, + * attempt to add valid unicast address. + */ + if (dhdp->ndo_host_ip_overflow) { + /* Find valid unicast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* valid unicast address */ + if (!(ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, + sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); -#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER - dhd_info_t *dhd = DHD_DEV_INFO(dev); + /* Add valid unicast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0); + if (ret < 0) { + goto done; + } + } + } - if (!dhd_master_mode) - add_remove = !add_remove; - DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); - if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) { - return 0; + /* Find anycast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + acaddr = inet6->ac_list; + while (acaddr) { + if (cnt < dhdp->ndo_max_host_ip) { + memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr)); + cnt++; + } + acaddr = acaddr->aca_next; } + read_unlock_bh(&inet6->lock); -#ifdef BLOCK_IPV6_PACKET - /* customer want to use NO IPV6 packets only */ - if (num == DHD_MULTICAST6_FILTER_NUM) { - return 0; + /* Add anycast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } } -#endif /* BLOCK_IPV6_PACKET */ - if (num >= dhd->pub.pktfilter_count) { - return -EINVAL; + /* Now All host IP addr were added successfully */ + if (dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = FALSE; + if (dhdp->in_suspend) { + /* drvier is in (early) suspend state, need to enable ND offload in FW */ + DHD_INFO(("%s: enable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); + } } - ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num); -#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ +done: + if (ipv6_addr) { + MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + } return ret; } +#pragma GCC diagnostic pop -int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) +#endif /* NDO_CONFIG_SUPPORT */ +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_stop_for_ssid */ +int +dhd_dev_pno_stop_for_ssid(struct net_device *dev) { - int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); - /* Packet filtering is set only if we still in early-suspend and - * we need either to turn it ON or turn it OFF - * We can always turn it OFF in case of early-suspend, but we turn it - * back ON only if suspend_disable_flag was not set - */ - if (dhdp && dhdp->up) { - if (dhdp->in_suspend) { - if (!val || (val && !dhdp->suspend_disable_flag)) - dhd_enable_packet_filter(val, dhdp); - } - } - return ret; + return (dhd_pno_stop_for_ssid(&dhd->pub)); } -/* function to enable/disable packet for Network device */ -int net_os_enable_packet_filter(struct net_device *dev, int val) +/* Linux wrapper to call common dhd_pno_set_for_ssid */ +int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) { dhd_info_t *dhd = DHD_DEV_INFO(dev); - DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); - return dhd_os_enable_packet_filter(&dhd->pub, val); + return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, + pno_repeat, pno_freq_expo_max, channel_list, nchan)); } -#endif /* PKT_FILTER_SUPPORT */ +/* Linux wrapper to call common dhd_pno_enable */ int -dhd_dev_init_ioctl(struct net_device *dev) +dhd_dev_pno_enable(struct net_device *dev, int enable) { dhd_info_t *dhd = DHD_DEV_INFO(dev); - int ret; - - if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) - goto done; -done: - return ret; + return (dhd_pno_enable(&dhd->pub, enable)); } +/* Linux wrapper to call common dhd_pno_set_for_hotlist */ int -dhd_dev_get_feature_set(struct net_device *dev) +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) { - dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); - dhd_pub_t *dhd = (&ptr->pub); - int feature_set = 0; - - if (FW_SUPPORTED(dhd, sta)) - feature_set |= WIFI_FEATURE_INFRA; - if (FW_SUPPORTED(dhd, dualband)) - feature_set |= WIFI_FEATURE_INFRA_5G; - if (FW_SUPPORTED(dhd, p2p)) - feature_set |= WIFI_FEATURE_P2P; - if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) - feature_set |= WIFI_FEATURE_SOFT_AP; - if (FW_SUPPORTED(dhd, tdls)) - feature_set |= WIFI_FEATURE_TDLS; - if (FW_SUPPORTED(dhd, vsdb)) - feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; - if (FW_SUPPORTED(dhd, nan)) { - feature_set |= WIFI_FEATURE_NAN; - /* NAN is essentail for d2d rtt */ - if (FW_SUPPORTED(dhd, rttd2d)) - feature_set |= WIFI_FEATURE_D2D_RTT; - } -#ifdef RTT_SUPPORT - if (dhd->rtt_supported) { - feature_set |= WIFI_FEATURE_D2D_RTT; - feature_set |= WIFI_FEATURE_D2AP_RTT; - } -#endif /* RTT_SUPPORT */ -#ifdef LINKSTAT_SUPPORT - feature_set |= WIFI_FEATURE_LINKSTAT; -#endif /* LINKSTAT_SUPPORT */ - -#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO) - if (dhd_is_pno_supported(dhd)) { - feature_set |= WIFI_FEATURE_PNO; -#ifdef GSCAN_SUPPORT - /* terence 20171115: remove to get GTS PASS - * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp - */ -// feature_set |= WIFI_FEATURE_GSCAN; -// feature_set |= WIFI_FEATURE_HAL_EPNO; -#endif /* GSCAN_SUPPORT */ - } -#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */ -#ifdef RSSI_MONITOR_SUPPORT - if (FW_SUPPORTED(dhd, rssi_mon)) { - feature_set |= WIFI_FEATURE_RSSI_MONITOR; - } -#endif /* RSSI_MONITOR_SUPPORT */ -#ifdef WL11U - feature_set |= WIFI_FEATURE_HOTSPOT; -#endif /* WL11U */ -#ifdef NDO_CONFIG_SUPPORT - feature_set |= WIFI_FEATURE_CONFIG_NDO; -#endif /* NDO_CONFIG_SUPPORT */ -#ifdef KEEP_ALIVE - feature_set |= WIFI_FEATURE_MKEEP_ALIVE; -#endif /* KEEP_ALIVE */ -#ifdef FILTER_IE - if (FW_SUPPORTED(dhd, fie)) { - feature_set |= WIFI_FEATURE_FILTER_IE; - } -#endif /* FILTER_IE */ -#ifdef ROAMEXP_SUPPORT - /* Check if the Android O roam feature is supported by FW */ - if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) { - feature_set |= WIFI_FEATURE_CONTROL_ROAMING; - } -#endif /* ROAMEXP_SUPPORT */ - return feature_set; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); +} +/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ +int +dhd_dev_pno_stop_for_batch(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_stop_for_batch(&dhd->pub)); } +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ int -dhd_dev_get_feature_set_matrix(struct net_device *dev, int num) +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) { - int feature_set_full; - int ret = 0; - - feature_set_full = dhd_dev_get_feature_set(dev); - - /* Common feature set for all interface */ - ret = (feature_set_full & WIFI_FEATURE_INFRA) | - (feature_set_full & WIFI_FEATURE_INFRA_5G) | - (feature_set_full & WIFI_FEATURE_D2D_RTT) | - (feature_set_full & WIFI_FEATURE_D2AP_RTT) | - (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) | - (feature_set_full & WIFI_FEATURE_EPR); + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} - /* Specific feature group for each interface */ - switch (num) { - case 0: - ret |= (feature_set_full & WIFI_FEATURE_P2P) | - /* Not supported yet */ - /* (feature_set_full & WIFI_FEATURE_NAN) | */ - (feature_set_full & WIFI_FEATURE_TDLS) | - (feature_set_full & WIFI_FEATURE_PNO) | - (feature_set_full & WIFI_FEATURE_HAL_EPNO) | - (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | - (feature_set_full & WIFI_FEATURE_GSCAN) | - (feature_set_full & WIFI_FEATURE_HOTSPOT) | - (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA); - break; +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +#endif /* PNO_SUPPORT */ - case 1: - ret |= (feature_set_full & WIFI_FEATURE_P2P); - /* Not yet verified NAN with P2P */ - /* (feature_set_full & WIFI_FEATURE_NAN) | */ - break; +#if defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +bool +dhd_dev_is_legacy_pno_enabled(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - case 2: - ret |= (feature_set_full & WIFI_FEATURE_NAN) | - (feature_set_full & WIFI_FEATURE_TDLS) | - (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL); - break; + return (dhd_is_legacy_pno_enabled(&dhd->pub)); +} - default: - ret = WIFI_FEATURE_INVALID; - DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num)); - break; +int +dhd_dev_set_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; } - - return ret; + return dhd_pno_set_epno(&dhd->pub); } - -#ifdef CUSTOM_FORCE_NODFS_FLAG int -dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +dhd_dev_flush_fw_epno(struct net_device *dev) { dhd_info_t *dhd = DHD_DEV_INFO(dev); - - if (nodfs) - dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; - else - dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; - dhd->pub.force_country_change = TRUE; - return 0; + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_flush_fw_epno(&dhd->pub); } -#endif /* CUSTOM_FORCE_NODFS_FLAG */ -#ifdef NDO_CONFIG_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ int -dhd_dev_ndo_cfg(struct net_device *dev, u8 enable) +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - dhd_pub_t *dhdp = &dhd->pub; - int ret = 0; - if (enable) { - /* enable ND offload feature (will be enabled in FW on suspend) */ - dhdp->ndo_enable = TRUE; + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} - /* Update changes of anycast address & DAD failed address */ - ret = dhd_dev_ndo_update_inet6addr(dev); - if ((ret < 0) && (ret != BCME_NORESOURCE)) { - DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret)); - return ret; - } - } else { - /* disable ND offload feature */ - dhdp->ndo_enable = FALSE; +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +int +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* disable ND offload in FW */ - ret = dhd_ndo_enable(dhdp, FALSE); - if (ret < 0) { - DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret)); - } - } - return ret; + return (dhd_wait_batch_results_complete(&dhd->pub)); } -/* #pragma used as a WAR to fix build failure, -* ignore dropping of 'const' qualifier in 'list_entry' macro -* this pragma disables the warning only for the following function -*/ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +int +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); -static int -dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6) + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) { - struct inet6_ifaddr *ifa; - struct ifacaddr6 *acaddr = NULL; - int addr_count = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* lock */ - read_lock_bh(&inet6->lock); + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} - /* Count valid unicast address */ - list_for_each_entry(ifa, &inet6->addr_list, if_list) { - if ((ifa->flags & IFA_F_DADFAILED) == 0) { - addr_count++; - } - } +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* Count anycast address */ - acaddr = inet6->ac_list; - while (acaddr) { - addr_count++; - acaddr = acaddr->aca_next; - } + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} - /* unlock */ - read_unlock_bh(&inet6->lock); +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return addr_count; + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); } -int -dhd_dev_ndo_update_inet6addr(struct net_device *dev) +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) { - dhd_info_t *dhd; - dhd_pub_t *dhdp; - struct inet6_dev *inet6; - struct inet6_ifaddr *ifa; - struct ifacaddr6 *acaddr = NULL; - struct in6_addr *ipv6_addr = NULL; - int cnt, i; - int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* - * this function evaulates host ip address in struct inet6_dev - * unicast addr in inet6_dev->addr_list - * anycast addr in inet6_dev->ac_list - * while evaluating inet6_dev, read_lock_bh() is required to prevent - * access on null(freed) pointer. - */ + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len)); +} - if (dev) { - inet6 = dev->ip6_ptr; - if (!inet6) { - DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__)); - return BCME_ERROR; - } +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, uint32 len, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - dhd = DHD_DEV_INFO(dev); - if (!dhd) { - DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); - return BCME_ERROR; - } - dhdp = &dhd->pub; + return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes)); +} - if (dhd_net2idx(dhd, dev) != 0) { - DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__)); - return BCME_ERROR; - } - } else { - DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__)); - return BCME_ERROR; - } +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* Check host IP overflow */ - cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6); - if (cnt > dhdp->ndo_max_host_ip) { - if (!dhdp->ndo_host_ip_overflow) { - dhdp->ndo_host_ip_overflow = TRUE; - /* Disable ND offload in FW */ - DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__)); - ret = dhd_ndo_enable(dhdp, FALSE); - } + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); - return ret; - } + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* - * Allocate ipv6 addr buffer to store addresses to be added/removed. - * driver need to lock inet6_dev while accessing structure. but, driver - * cannot use ioctl while inet6_dev locked since it requires scheduling - * hence, copy addresses to the buffer and do ioctl after unlock. - */ - ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh, - sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); - if (!ipv6_addr) { - DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__)); - return BCME_NOMEM; - } + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} - /* Find DAD failed unicast address to be removed */ - cnt = 0; - read_lock_bh(&inet6->lock); - list_for_each_entry(ifa, &inet6->addr_list, if_list) { - /* DAD failed unicast address */ - if ((ifa->flags & IFA_F_DADFAILED) && - (cnt < dhdp->ndo_max_host_ip)) { - memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr)); - cnt++; - } - } - read_unlock_bh(&inet6->lock); +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* Remove DAD failed unicast address */ - for (i = 0; i < cnt; i++) { - DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__)); - ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0); - if (ret < 0) { - goto done; - } - } + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} - /* Remove all anycast address */ - ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); - if (ret < 0) { - goto done; - } +/* Linux wrapper to call common dhd_pno_process_epno_result */ +void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - /* - * if ND offload was disabled due to host ip overflow, - * attempt to add valid unicast address. - */ - if (dhdp->ndo_host_ip_overflow) { - /* Find valid unicast address */ - cnt = 0; - read_lock_bh(&inet6->lock); - list_for_each_entry(ifa, &inet6->addr_list, if_list) { - /* valid unicast address */ - if (!(ifa->flags & IFA_F_DADFAILED) && - (cnt < dhdp->ndo_max_host_ip)) { - memcpy(&ipv6_addr[cnt], &ifa->addr, - sizeof(struct in6_addr)); - cnt++; - } - } - read_unlock_bh(&inet6->lock); + return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes)); +} - /* Add valid unicast address */ - for (i = 0; i < cnt; i++) { - ret = dhd_ndo_add_ip_with_type(dhdp, - (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0); - if (ret < 0) { - goto done; - } - } - } +int +dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + int err; - /* Find anycast address */ - cnt = 0; - read_lock_bh(&inet6->lock); - acaddr = inet6->ac_list; - while (acaddr) { - if (cnt < dhdp->ndo_max_host_ip) { - memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr)); - cnt++; - } - acaddr = acaddr->aca_next; + if (!roam_param) { + return BCME_BADARG; } - read_unlock_bh(&inet6->lock); - /* Add anycast address */ - for (i = 0; i < cnt; i++) { - ret = dhd_ndo_add_ip_with_type(dhdp, - (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); - if (ret < 0) { - goto done; - } - } + DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n", + roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold)); + DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n", + roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor, + roam_param->cur_bssid_boost)); + DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n", + roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost)); - /* Now All host IP addr were added successfully */ - if (dhdp->ndo_host_ip_overflow) { - dhdp->ndo_host_ip_overflow = FALSE; - if (dhdp->in_suspend) { - /* drvier is in (early) suspend state, need to enable ND offload in FW */ - DHD_INFO(("%s: enable NDO\n", __FUNCTION__)); - ret = dhd_ndo_enable(dhdp, TRUE); - } + memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT; + if (dhd->pub.lazy_roam_enable) { + roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG; } - -done: - if (ipv6_addr) { - MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); } - - return ret; + return err; } -#pragma GCC diagnostic pop - -#endif /* NDO_CONFIG_SUPPORT */ -#ifdef PNO_SUPPORT -/* Linux wrapper to call common dhd_pno_stop_for_ssid */ int -dhd_dev_pno_stop_for_ssid(struct net_device *dev) +dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; - return (dhd_pno_stop_for_ssid(&dhd->pub)); + memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + if (enable) { + roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG; + } + + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } else { + dhd->pub.lazy_roam_enable = (enable != 0); + } + return err; } -/* Linux wrapper to call common dhd_pno_set_for_ssid */ int -dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, - uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + int err; + uint len; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, - pno_repeat, pno_freq_expo_max, channel_list, nchan)); + bssid_pref->version = BSSID_PREF_LIST_VERSION; + /* By default programming bssid pref flushes out old values */ + bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0; + len = sizeof(wl_bssid_pref_cfg_t); + if (bssid_pref->count) { + len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t); + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref", + (char *)bssid_pref, len, NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; } +#endif /* GSCAN_SUPPORT */ -/* Linux wrapper to call common dhd_pno_enable */ +#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) int -dhd_dev_pno_enable(struct net_device *dev, int enable) +dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int macmode; - return (dhd_pno_enable(&dhd->pub, enable)); + if (blacklist) { + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist, + len, TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err)); + return err; + } + } + /* By default programming blacklist flushes out old values */ + macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY; + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode, + sizeof(macmode), TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err)); + } + return err; } -/* Linux wrapper to call common dhd_pno_set_for_hotlist */ int -dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, - struct dhd_pno_hotlist_params *hotlist_params) +dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist, + uint32 len, uint32 flush) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_ssid_whitelist_t whitelist_ssid_flush; + + if (!ssid_whitelist) { + if (flush) { + ssid_whitelist = &whitelist_ssid_flush; + ssid_whitelist->ssid_count = 0; + } else { + DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__)); + return BCME_BADARG; + } + } + ssid_whitelist->version = SSID_WHITELIST_VERSION; + ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0; + err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL, + 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; } -/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ -int -dhd_dev_pno_stop_for_batch(struct net_device *dev) +#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_stop_for_batch(&dhd->pub)); -} + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); -/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ -int -dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); } +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#endif // endif -/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +#ifdef RSSI_MONITOR_SUPPORT int -dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); + int err; + wl_rssi_monitor_cfg_t rssi_monitor; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + rssi_monitor.version = RSSI_MONITOR_VERSION; + rssi_monitor.max_rssi = max_rssi; + rssi_monitor.min_rssi = min_rssi; + rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP; + err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor), + NULL, 0, TRUE); + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + } + return err; } -#endif /* PNO_SUPPORT */ +#endif /* RSSI_MONITOR_SUPPORT */ -#if defined(PNO_SUPPORT) -#ifdef GSCAN_SUPPORT -bool -dhd_dev_is_legacy_pno_enabled(struct net_device *dev) +#ifdef DHDTCPACK_SUPPRESS +int +dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable) { + int err; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_is_legacy_pno_enabled(&dhd->pub)); + err = dhd_tcpack_suppress_set(&dhd->pub, enable); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err)); + } + return err; } +#endif /* DHDTCPACK_SUPPRESS */ int -dhd_dev_set_epno(struct net_device *dev) +dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - if (!dhd) { + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (!dhdp || !oui) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); return BCME_ERROR; } - return dhd_pno_set_epno(&dhd->pub); + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + return BCME_ERROR; + } else { + uint8 *rand_mac_oui = dhdp->rand_mac_oui; + memcpy(rand_mac_oui, oui, DOT11_OUI_LEN); + DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + } + return BCME_OK; } + int -dhd_dev_flush_fw_epno(struct net_device *dev) +dhd_set_rand_mac_oui(dhd_pub_t *dhd) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - if (!dhd) { - return BCME_ERROR; + int err; + wl_pfn_macaddr_cfg_t wl_cfg; + uint8 *rand_mac_oui = dhd->rand_mac_oui; + + memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN); + memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN); + wl_cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) { + wl_cfg.flags = 0; + } else { + wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); } - return dhd_pno_flush_fw_epno(&dhd->pub); + + DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err)); + } + return err; } +#if defined(RTT_SUPPORT) && defined(WL_CFG80211) /* Linux wrapper to call common dhd_pno_set_cfg_gscan */ int -dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, - void *buf, bool flush) +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); + return (dhd_rtt_set_cfg(&dhd->pub, buf)); } -/* Linux wrapper to call common dhd_wait_batch_results_complete */ int -dhd_dev_wait_batch_results_complete(struct net_device *dev) +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_wait_batch_results_complete(&dhd->pub)); + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); } -/* Linux wrapper to call common dhd_pno_lock_batch_results */ int -dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_lock_batch_results(&dhd->pub)); + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); } -/* Linux wrapper to call common dhd_pno_unlock_batch_results */ -void -dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_unlock_batch_results(&dhd->pub)); + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); } -/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ int -dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); + return (dhd_rtt_capability(&dhd->pub, capa)); } -/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ int -dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - - return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); + return (dhd_rtt_avail_channel(&dhd->pub, channel_info)); } -/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ -void * -dhd_dev_hotlist_scan_event(struct net_device *dev, - const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - - return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len)); + return (dhd_rtt_enable_responder(&dhd->pub, channel_info)); } -/* Linux wrapper to call common dhd_process_full_gscan_result */ -void * -dhd_dev_process_full_gscan_result(struct net_device *dev, -const void *data, uint32 len, int *send_evt_bytes) +int dhd_dev_rtt_cancel_responder(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_cancel_responder(&dhd->pub)); +} + +#endif /* RTT_SUPPORT */ + +#ifdef KEEP_ALIVE +#define KA_TEMP_BUF_SIZE 512 +#define KA_FRAME_SIZE 300 + +int +dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, + uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec) +{ + const int ETHERTYPE_LEN = 2; + char *pbuf = NULL; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; + int buf_len = 0; + int str_len = 0; + int res = BCME_ERROR; + int len_bytes = 0; + int i = 0; + + /* ether frame to have both max IP pkt (256 bytes) and ether header */ + char *pmac_frame = NULL; + char *pmac_frame_begin = NULL; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + res = BCME_NOMEM; + return res; + } + + if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE)); + res = BCME_NOMEM; + goto exit; + } + pmac_frame_begin = pmac_frame; + + /* + * Get current mkeep-alive status. + */ + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf, + KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check available ID whether it is occupied */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n", + __FUNCTION__, mkeep_alive_id)); + + /* Current occupied ID info */ + DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_ERROR((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); + + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_ERROR(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_ERROR(("\n")); + + res = BCME_NOTFOUND; + goto exit; + } + } + + /* Request the specified ID */ + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + memset(pbuf, 0, KA_TEMP_BUF_SIZE); + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(pbuf, str, str_len); + pbuf[str_len] = '\0'; + + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1); + mkeep_alive_pkt.period_msec = htod32(period_msec); + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* ID assigned */ + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + + /* + * Build up Ethernet Frame + */ + + /* Mapping dest mac addr */ + memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; - return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes)); -} + /* Mapping src mac addr */ + memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; -void -dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */ + *(pmac_frame++) = 0x08; + *(pmac_frame++) = 0x00; - dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + /* Mapping IP pkt */ + memcpy(pmac_frame, ip_pkt, ip_pkt_len); + pmac_frame += ip_pkt_len; - return; -} + /* + * Length of ether frame (assume to be all hexa bytes) + * = src mac + dst mac + ether type + ip pkt len + */ + len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len; + memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes); + buf_len += len_bytes; + mkeep_alive_pkt.len_bytes = htod16(len_bytes); -int -dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + /* + * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); - return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); + res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0); +exit: + if (pmac_frame_begin) { + MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE); + pmac_frame_begin = NULL; + } + if (pbuf) { + MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); + pbuf = NULL; + } + return res; } -/* Linux wrapper to call common dhd_retreive_batch_scan_results */ int -dhd_dev_retrieve_batch_scan(struct net_device *dev) +dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + char *pbuf = NULL; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; + int res = BCME_ERROR; + int i = 0; - return (dhd_retreive_batch_scan_results(&dhd->pub)); -} + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; -/* Linux wrapper to call common dhd_pno_process_epno_result */ -void * dhd_dev_process_epno_result(struct net_device *dev, - const void *data, uint32 event, int *send_evt_bytes) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + DHD_TRACE(("%s execution\n", __FUNCTION__)); - return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes)); -} + /* + * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt. + */ + if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + return res; + } -int -dhd_dev_set_lazy_roam_cfg(struct net_device *dev, - wlc_roam_exp_params_t *roam_param) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - wl_roam_exp_cfg_t roam_exp_cfg; - int err; + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, + sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check occupied ID */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_INFO((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); - if (!roam_param) { - return BCME_BADARG; + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_INFO(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_INFO(("\n")); } - DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n", - roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold)); - DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n", - roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor, - roam_param->cur_bssid_boost)); - DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n", - roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost)); + /* Make it stop if available */ + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id)); + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); - memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param)); - roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; - roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT; - if (dhd->pub.lazy_roam_enable) { - roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG; + mkeep_alive_pkt.period_msec = 0; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", + (char *)&mkeep_alive_pkt, + WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE); + } else { + DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id)); + res = BCME_NOTFOUND; } - err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", - (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, - TRUE); - if (err < 0) { - DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); +exit: + if (pbuf) { + MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); + pbuf = NULL; } - return err; + return res; } +#endif /* KEEP_ALIVE */ -int -dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable) +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static void _dhd_apf_lock_local(dhd_info_t *dhd) { - int err; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - wl_roam_exp_cfg_t roam_exp_cfg; - - memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg)); - roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; - if (enable) { - roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG; + if (dhd) { + mutex_lock(&dhd->dhd_apf_mutex); } +} - err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", - (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, - TRUE); - if (err < 0) { - DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); - } else { - dhd->pub.lazy_roam_enable = (enable != 0); +static void _dhd_apf_unlock_local(dhd_info_t *dhd) +{ + if (dhd) { + mutex_unlock(&dhd->dhd_apf_mutex); } - return err; } -int -dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, - wl_bssid_pref_cfg_t *bssid_pref, uint32 flush) +static int +__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len) { - int err; - uint len; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_t * pkt_filterp; + wl_apf_program_t *apf_program; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_add"; - bssid_pref->version = BSSID_PREF_LIST_VERSION; - /* By default programming bssid pref flushes out old values */ - bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0; - len = sizeof(wl_bssid_pref_cfg_t); - if (bssid_pref->count) { - len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t); + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; } - err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref", - (char *)bssid_pref, len, NULL, 0, TRUE); - if (err != BCME_OK) { - DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + + cmd_len = sizeof(cmd); + + /* Check if the program_len is more than the expected len + * and if the program is NULL return from here. + */ + if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) { + DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n", + __FUNCTION__, program_len, program)); + return -EINVAL; } - return err; -} -#endif /* GSCAN_SUPPORT */ + buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN + + WL_APF_PROGRAM_FIXED_LEN + program_len; -#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) -int -dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, - uint32 len, uint32 flush) -{ - int err; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - int macmode; + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } - if (blacklist) { - err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist, - len, TRUE, 0); - if (err != BCME_OK) { - DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err)); - return err; - } + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->negate_match = htod32(FALSE); + pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH); + + apf_program = &pkt_filterp->u.apf_program; + apf_program->version = htod16(WL_APF_INTERNAL_VERSION); + apf_program->instr_len = htod16(program_len); + memcpy(apf_program->instrs, program, program_len); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); } - /* By default programming blacklist flushes out old values */ - macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY; - err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode, - sizeof(macmode), TRUE, 0); - if (err != BCME_OK) { - DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err)); + + if (buf) { + MFREE(dhdp->osh, buf, buf_len); } - return err; + return ret; } -int -dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist, - uint32 len, uint32 flush) +static int +__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable) { - int err; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - wl_ssid_whitelist_t whitelist_ssid_flush; + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_enable_t * pkt_filterp; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_enable"; - if (!ssid_whitelist) { - if (flush) { - ssid_whitelist = &whitelist_ssid_flush; - ssid_whitelist->ssid_count = 0; - } else { - DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__)); - return BCME_BADARG; - } + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; } - ssid_whitelist->version = SSID_WHITELIST_VERSION; - ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0; - err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL, - 0, TRUE); - if (err != BCME_OK) { - DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + + cmd_len = sizeof(cmd); + buf_len = cmd_len + sizeof(*pkt_filterp); + + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; } - return err; -} -#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ -#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) -/* Linux wrapper to call common dhd_pno_get_gscan */ -void * -dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, - void *info, uint32 *len) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + memcpy(buf, cmd, cmd_len); - return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); + pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->enable = htod32(enable); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + goto exit; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode, + WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + +exit: + if (buf) { + MFREE(dhdp->osh, buf, buf_len); + } + return ret; } -#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ -#endif // endif -#ifdef RSSI_MONITOR_SUPPORT -int -dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, - int8 max_rssi, int8 min_rssi) +static int +__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id) { - int err; - wl_rssi_monitor_cfg_t rssi_monitor; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; - rssi_monitor.version = RSSI_MONITOR_VERSION; - rssi_monitor.max_rssi = max_rssi; - rssi_monitor.min_rssi = min_rssi; - rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP; - err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor), - NULL, 0, TRUE); - if (err < 0 && err != BCME_UNSUPPORTED) { - DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; } - return err; + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete", + htod32(filter_id), WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + return ret; } -#endif /* RSSI_MONITOR_SUPPORT */ -#ifdef DHDTCPACK_SUPPRESS -int -dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable) +void dhd_apf_lock(struct net_device *dev) { - int err; - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_lock_local(dhd); +} - err = dhd_tcpack_suppress_set(&dhd->pub, enable); - if (err != BCME_OK) { - DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err)); - } - return err; +void dhd_apf_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_unlock_local(dhd); } -#endif /* DHDTCPACK_SUPPRESS */ int -dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui) +dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = DHD_DEV_INFO(ndev); dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; - if (!dhdp || !oui) { - DHD_ERROR(("NULL POINTER : %s\n", - __FUNCTION__)); - return BCME_ERROR; + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + + /* + * Notify Android framework that APF is not supported by setting + * version as zero. + */ + *version = 0; + return BCME_OK; } - if (ETHER_ISMULTI(oui)) { - DHD_ERROR(("Expected unicast OUI\n")); - return BCME_ERROR; - } else { - uint8 *rand_mac_oui = dhdp->rand_mac_oui; - memcpy(rand_mac_oui, oui, DOT11_OUI_LEN); - DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n", - MACOUI2STRDBG(rand_mac_oui))); + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; } - return BCME_OK; + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF version, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; } int -dhd_set_rand_mac_oui(dhd_pub_t *dhd) +dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len) { - int err; - wl_pfn_macaddr_cfg_t wl_cfg; - uint8 *rand_mac_oui = dhd->rand_mac_oui; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; - memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN); - memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN); - wl_cfg.version = WL_PFN_MACADDR_CFG_VER; - if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) { - wl_cfg.flags = 0; - } else { - wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + *max_len = 0; + return BCME_OK; } - DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n", - MACOUI2STRDBG(rand_mac_oui))); + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } - err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err)); + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n", + __FUNCTION__, ret)); } - return err; + + return ret; } -#ifdef RTT_SUPPORT -/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ int -dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret; - return (dhd_rtt_set_cfg(&dhd->pub, buf)); -} + DHD_APF_LOCK(ndev); -int -dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + /* delete, if filter already exists */ + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (unlikely(ret)) { + goto exit; + } + dhdp->apf_set = FALSE; + } - return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); -} + ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len); + if (ret) { + goto exit; + } + dhdp->apf_set = TRUE; -int -dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + /* Driver is still in (early) suspend state, enable APF filter back */ + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } +exit: + DHD_APF_UNLOCK(ndev); - return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); + return ret; } int -dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) +dhd_dev_apf_enable_filter(struct net_device *ndev) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + bool nan_dp_active = false; - return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); -} + DHD_APF_LOCK(ndev); +#ifdef WL_NAN + nan_dp_active = wl_cfgnan_is_dp_active(ndev); +#endif /* WL_NAN */ + if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) && + !nan_dp_active)) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } -int -dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + DHD_APF_UNLOCK(ndev); - return (dhd_rtt_capability(&dhd->pub, capa)); + return ret; } int -dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info) +dhd_dev_apf_disable_filter(struct net_device *ndev) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_rtt_avail_channel(&dhd->pub, channel_info)); -} + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; -int -dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_rtt_enable_responder(&dhd->pub, channel_info)); -} + DHD_APF_LOCK(ndev); -int dhd_dev_rtt_cancel_responder(struct net_device *dev) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_rtt_cancel_responder(&dhd->pub)); -} + if (dhdp->apf_set) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE); + } -#endif /* RTT_SUPPORT */ + DHD_APF_UNLOCK(ndev); -#ifdef KEEP_ALIVE -#define KA_TEMP_BUF_SIZE 512 -#define KA_FRAME_SIZE 300 + return ret; +} int -dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, - uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec) +dhd_dev_apf_delete_filter(struct net_device *ndev) { - const int ETHERTYPE_LEN = 2; - char *pbuf = NULL; - const char *str; - wl_mkeep_alive_pkt_t mkeep_alive_pkt; - wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; - int buf_len = 0; - int str_len = 0; - int res = BCME_ERROR; - int len_bytes = 0; - int i = 0; - - /* ether frame to have both max IP pkt (256 bytes) and ether header */ - char *pmac_frame = NULL; - char *pmac_frame_begin = NULL; - - /* - * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, - * dongle shall reject a mkeep_alive request. - */ - if (!dhd_support_sta_mode(dhd_pub)) - return res; - - DHD_TRACE(("%s execution\n", __FUNCTION__)); - - if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { - DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); - res = BCME_NOMEM; - return res; - } - - if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) { - DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE)); - res = BCME_NOMEM; - goto exit; - } - pmac_frame_begin = pmac_frame; - - /* - * Get current mkeep-alive status. - */ - res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf, - KA_TEMP_BUF_SIZE, FALSE); - if (res < 0) { - DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); - goto exit; - } else { - /* Check available ID whether it is occupied */ - mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; - if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { - DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n", - __FUNCTION__, mkeep_alive_id)); - - /* Current occupied ID info */ - DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__)); - DHD_ERROR((" Id : %d\n" - " Period: %d msec\n" - " Length: %d\n" - " Packet: 0x", - mkeep_alive_pktp->keep_alive_id, - dtoh32(mkeep_alive_pktp->period_msec), - dtoh16(mkeep_alive_pktp->len_bytes))); + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; - for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { - DHD_ERROR(("%02x", mkeep_alive_pktp->data[i])); - } - DHD_ERROR(("\n")); + DHD_APF_LOCK(ndev); - res = BCME_NOTFOUND; - goto exit; + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (!ret) { + dhdp->apf_set = FALSE; } } - /* Request the specified ID */ - memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); - memset(pbuf, 0, KA_TEMP_BUF_SIZE); - str = "mkeep_alive"; - str_len = strlen(str); - strncpy(pbuf, str, str_len); - pbuf[str_len] = '\0'; - - mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1); - mkeep_alive_pkt.period_msec = htod32(period_msec); - buf_len = str_len + 1; - mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); - mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); - - /* ID assigned */ - mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; - - buf_len += WL_MKEEP_ALIVE_FIXED_LEN; - - /* - * Build up Ethernet Frame - */ - - /* Mapping dest mac addr */ - memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN); - pmac_frame += ETHER_ADDR_LEN; - - /* Mapping src mac addr */ - memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN); - pmac_frame += ETHER_ADDR_LEN; + DHD_APF_UNLOCK(ndev); - /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */ - *(pmac_frame++) = 0x08; - *(pmac_frame++) = 0x00; + return ret; +} +#endif /* PKT_FILTER_SUPPORT && APF */ - /* Mapping IP pkt */ - memcpy(pmac_frame, ip_pkt, ip_pkt_len); - pmac_frame += ip_pkt_len; +static void dhd_hang_process(struct work_struct *work_data) +{ + struct net_device *dev; +#ifdef IFACE_HANG_FORCE_DEV_CLOSE + struct net_device *ndev; + uint8 i = 0; +#endif /* IFACE_HANG_FORCE_DEV_CLOSE */ +/* Ignore compiler warnings due to -Werror=cast-qual */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct dhd_info *dhd = + container_of(work_data, dhd_info_t, dhd_hang_process_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif - /* - * Length of ether frame (assume to be all hexa bytes) - * = src mac + dst mac + ether type + ip pkt len - */ - len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len; - memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes); - buf_len += len_bytes; - mkeep_alive_pkt.len_bytes = htod16(len_bytes); + if (!dhd || !dhd->iflist[0]) + return; + dev = dhd->iflist[0]->net; + if (dev) { +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif // endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif // endif + } +#ifdef IFACE_HANG_FORCE_DEV_CLOSE /* - * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and - * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no - * guarantee that the buffer is properly aligned. + * For HW2, dev_close need to be done to recover + * from upper layer after hang. For Interposer skip + * dev_close so that dhd iovars can be used to take + * socramdump after crash, also skip for HW4 as + * handling of hang event is different */ - memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); - res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0); -exit: - if (pmac_frame_begin) { - MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE); - pmac_frame_begin = NULL; - } - if (pbuf) { - MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); - pbuf = NULL; + rtnl_lock(); + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL; + if (ndev && (ndev->flags & IFF_UP)) { + DHD_ERROR(("ndev->name : %s dev close\n", + ndev->name)); + dev_close(ndev); + } } - return res; + rtnl_unlock(); +#endif /* IFACE_HANG_FORCE_DEV_CLOSE */ } -int -dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id) +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) { - char *pbuf = NULL; - wl_mkeep_alive_pkt_t mkeep_alive_pkt; - wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; - int res = BCME_ERROR; - int i = 0; - - /* - * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, - * dongle shall reject a mkeep_alive request. - */ - if (!dhd_support_sta_mode(dhd_pub)) - return res; + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ - DHD_TRACE(("%s execution\n", __FUNCTION__)); +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; +#ifdef WL_CFG80211 + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; +#ifdef DHD_FILE_DUMP_EVENT + dhd_info_t *dhd_info = NULL; +#endif /* DHD_FILE_DUMP_EVENT */ +#endif /* WL_CFG80211 */ - /* - * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt. - */ - if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { - DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); - return res; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__)); + return -EINVAL; } - res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, - sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE); - if (res < 0) { - DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); - goto exit; - } else { - /* Check occupied ID */ - mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; - DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__)); - DHD_INFO((" Id : %d\n" - " Period: %d msec\n" - " Length: %d\n" - " Packet: 0x", - mkeep_alive_pktp->keep_alive_id, - dtoh32(mkeep_alive_pktp->period_msec), - dtoh16(mkeep_alive_pktp->len_bytes))); +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) + dhd_info = (dhd_info_t *)dhdp->info; - for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { - DHD_INFO(("%02x", mkeep_alive_pktp->data[i])); - } - DHD_INFO(("\n")); + if (dhd_info->scheduled_memdump) { + DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__)); + dhdp->hang_was_pending = 1; + return BCME_OK; } +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */ - /* Make it stop if available */ - if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { - DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id)); - memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); - - mkeep_alive_pkt.period_msec = 0; - mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); - mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); - mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; +#ifdef WL_CFG80211 + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__)); + return -ENODEV; + } + cfg = wl_get_cfg(primary_ndev); + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return -EINVAL; + } - res = dhd_iovar(dhd_pub, 0, "mkeep_alive", - (char *)&mkeep_alive_pkt, - WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE); - } else { - DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id)); - res = BCME_NOTFOUND; + /* Skip sending HANG event to framework if driver is not ready */ + if (!wl_get_drv_status(cfg, READY, primary_ndev)) { + DHD_ERROR(("%s: device is not ready\n", __FUNCTION__)); + return -ENODEV; } -exit: - if (pbuf) { - MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); - pbuf = NULL; +#endif /* WL_CFG80211 */ + + if (!dhdp->hang_was_sent) { +#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) + dhdp->hang_counts++; + if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) { + DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n", + __func__, dhdp->hang_counts)); + BUG_ON(1); + } +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ +#ifdef DHD_DEBUG_UART + /* If PCIe lane has broken, execute the debug uart application + * to gether a ramdump data from dongle via uart + */ + if (!dhdp->info->duart_execute) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP, + dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH); + } +#endif /* DHD_DEBUG_UART */ + dhdp->hang_was_sent = 1; +#ifdef BT_OVER_SDIO + dhdp->is_bt_recovery_required = TRUE; +#endif // endif + schedule_work(&dhdp->info->dhd_hang_process_work); + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); } - return res; + return ret; } -#endif /* KEEP_ALIVE */ -#if defined(PKT_FILTER_SUPPORT) && defined(APF) -static void _dhd_apf_lock_local(dhd_info_t *dhd) +int net_os_send_hang_message(struct net_device *dev) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + if (dhd) { - mutex_lock(&dhd->dhd_apf_mutex); + /* Report FW problem when enabled */ + if (dhd->pub.hang_report) { +#ifdef BT_OVER_SDIO + if (netif_running(dev)) { +#endif /* BT_OVER_SDIO */ + ret = dhd_os_send_hang_message(&dhd->pub); +#ifdef BT_OVER_SDIO + } + DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev)); +#endif /* BT_OVER_SDIO */ + } else { + DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", + __FUNCTION__)); + } } -#endif // endif + return ret; } -static void _dhd_apf_unlock_local(dhd_info_t *dhd) +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); if (dhd) { - mutex_unlock(&dhd->dhd_apf_mutex); + dhdp = &dhd->pub; } -#endif // endif + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); } -static int -__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, - u8* program, uint32 program_len) +int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; - wl_pkt_filter_t * pkt_filterp; - wl_apf_program_t *apf_program; - char *buf; - u32 cmd_len, buf_len; - int ifidx, ret; - char cmd[] = "pkt_filter_add"; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} - ifidx = dhd_net2idx(dhd, ndev); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); - return -ENODEV; - } +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); - cmd_len = sizeof(cmd); + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} - /* Check if the program_len is more than the expected len - * and if the program is NULL return from here. - */ - if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) { - DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n", - __FUNCTION__, program_len, program)); - return -EINVAL; +void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->pub.is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else + get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ } - buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN + - WL_APF_PROGRAM_FIXED_LEN + program_len; - - buf = MALLOCZ(dhdp->osh, buf_len); - if (unlikely(!buf)) { - DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); - return -ENOMEM; +#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE) + else { + /* Replace the ccode to XZ if ccode is undefined country */ + if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) { + strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ); + strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ); + strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ); + DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code)); + } } +#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */ - memcpy(buf, cmd, cmd_len); - - pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len); - pkt_filterp->id = htod32(filter_id); - pkt_filterp->negate_match = htod32(FALSE); - pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH); + BCM_REFERENCE(dhd); +} - apf_program = &pkt_filterp->u.apf_program; - apf_program->version = htod16(WL_APF_INTERNAL_VERSION); - apf_program->instr_len = htod16(program_len); - memcpy(apf_program->instrs, program, program_len); +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif // endif - ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n", - __FUNCTION__, filter_id, ret)); + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, notify); +#endif // endif } +} - if (buf) { - MFREE(dhdp->osh, buf, buf_len); +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif // endif + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, true); +#endif // endif } - return ret; } -static int -__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, - uint32 mode, uint32 enable) +int dhd_net_set_fw_path(struct net_device *dev, char *fw) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; - wl_pkt_filter_enable_t * pkt_filterp; - char *buf; - u32 cmd_len, buf_len; - int ifidx, ret; - char cmd[] = "pkt_filter_enable"; + dhd_info_t *dhd = DHD_DEV_INFO(dev); - ifidx = dhd_net2idx(dhd, ndev); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); - return -ENODEV; - } + if (!fw || fw[0] == '\0') + return -EINVAL; - cmd_len = sizeof(cmd); - buf_len = cmd_len + sizeof(*pkt_filterp); + strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1); + dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0'; - buf = MALLOCZ(dhdp->osh, buf_len); - if (unlikely(!buf)) { - DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); - return -ENOMEM; +#if defined(SOFTAP) + if (strstr(fw, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; } +#endif // endif + return 0; +} - memcpy(buf, cmd, cmd_len); +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_lock_local(dhd); +} - pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len); - pkt_filterp->id = htod32(filter_id); - pkt_filterp->enable = htod32(enable); +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_unlock_local(dhd); +} - ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n", - __FUNCTION__, filter_id, ret)); - goto exit; - } +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +} - ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode, - WLC_SET_VAR, TRUE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n", - __FUNCTION__, filter_id, ret)); - } +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +} -exit: - if (buf) { - MFREE(dhdp->osh, buf, buf_len); - } - return ret; +static void dhd_suspend_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); } -static int -__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id) +static void dhd_suspend_unlock(dhd_pub_t *pub) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); - dhd_pub_t *dhdp = &dhd->pub; - int ifidx, ret; + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +} - ifidx = dhd_net2idx(dhd, ndev); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); - return -ENODEV; - } +unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; - ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete", - htod32(filter_id), WLC_SET_VAR, TRUE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n", - __FUNCTION__, filter_id, ret)); - } + if (dhd) + spin_lock_irqsave(&dhd->dhd_lock, flags); - return ret; + return flags; } -void dhd_apf_lock(struct net_device *dev) +void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - _dhd_apf_lock_local(dhd); + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) + spin_unlock_irqrestore(&dhd->dhd_lock, flags); } -void dhd_apf_unlock(struct net_device *dev) +/* Linux specific multipurpose spinlock API */ +void * +dhd_os_spin_lock_init(osl_t *osh) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - _dhd_apf_unlock_local(dhd); + /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ + /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ + /* and this results in kernel asserts in internal builds */ + spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); + if (lock) + spin_lock_init(lock); + return ((void *)lock); +} +void +dhd_os_spin_lock_deinit(osl_t *osh, void *lock) +{ + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); } +unsigned long +dhd_os_spin_lock(void *lock) +{ + unsigned long flags = 0; -int -dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version) + if (lock) + spin_lock_irqsave((spinlock_t *)lock, flags); + + return flags; +} +void +dhd_os_spin_unlock(void *lock, unsigned long flags) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; - int ifidx, ret; + if (lock) + spin_unlock_irqrestore((spinlock_t *)lock, flags); +} - if (!FW_SUPPORTED(dhdp, apf)) { - DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); +void * +dhd_os_dbgring_lock_init(osl_t *osh) +{ + struct mutex *mtx = NULL; - /* - * Notify Android framework that APF is not supported by setting - * version as zero. - */ - *version = 0; - return BCME_OK; - } + mtx = MALLOCZ(osh, sizeof(*mtx)); + if (mtx) + mutex_init(mtx); - ifidx = dhd_net2idx(dhd, ndev); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); - return -ENODEV; - } + return mtx; +} - ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version, - WLC_GET_VAR, FALSE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to get APF version, ret=%d\n", - __FUNCTION__, ret)); +void +dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx) +{ + if (mtx) { + mutex_destroy(mtx); + MFREE(osh, mtx, sizeof(struct mutex)); } +} - return ret; +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); } +#define MAX_WAIT_FOR_8021X_TX 100 + int -dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len) +dhd_wait_pend8021x(struct net_device *dev) { - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); - dhd_pub_t *dhdp = &dhd->pub; - int ifidx, ret; - - if (!FW_SUPPORTED(dhdp, apf)) { - DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); - *max_len = 0; - return BCME_OK; - } + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); - ifidx = dhd_net2idx(dhd, ndev); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); - return -ENODEV; + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + DHD_PERIM_UNLOCK(&dhd->pub); + schedule_timeout(timeout); + DHD_PERIM_LOCK(&dhd->pub); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); } - - ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len, - WLC_GET_VAR, FALSE, ifidx); - if (unlikely(ret)) { - DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n", - __FUNCTION__, ret)); + if (ntimes == 0) + { + atomic_set(&dhd->pend_8021x_cnt, 0); + DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__)); } - - return ret; + return pend; } -int -dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, - uint32 program_len) +#if defined(DHD_DEBUG) +int write_file(const char * file_name, uint32 flags, uint8 *buf, int size) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; - int ret; + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); - DHD_APF_LOCK(ndev); + /* open file to write */ + fp = filp_open(file_name, flags, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp))); + goto exit; + } - /* delete, if filter already exists */ - if (dhdp->apf_set) { - ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); - if (unlikely(ret)) { - goto exit; - } - dhdp->apf_set = FALSE; + /* Write buf to file */ + ret = compat_vfs_write(fp, buf, size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; } - ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len); - if (ret) { + /* Sync file from filesystem to physical media */ + ret = vfs_fsync(fp, 0); + if (ret < 0) { + DHD_ERROR(("sync file error, error = %d\n", ret)); goto exit; } - dhdp->apf_set = TRUE; + ret = BCME_OK; - if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { - /* Driver is still in (early) suspend state, enable APF filter back */ - ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, - PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); - } exit: - DHD_APF_UNLOCK(ndev); + /* close file before return */ + if (!IS_ERR(fp)) + filp_close(fp, current->files); + + /* restore previous address limit */ + set_fs(old_fs); return ret; } +#endif // endif -int -dhd_dev_apf_enable_filter(struct net_device *ndev) +#ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; - int ret = 0; - bool nan_dp_active = false; + char *type_str = NULL; - DHD_APF_LOCK(ndev); -#ifdef WL_NAN - nan_dp_active = wl_cfgnan_is_dp_active(ndev); -#endif /* WL_NAN */ - if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) && - !nan_dp_active)) { - ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, - PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_PKTID_INVALID: + type_str = "PKTID_INVALID"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + if (substr_type == CMD_UNWANTED) { + type_str = "BY_SYSDUMP_FORUSER_unwanted"; + } else if (substr_type == CMD_DISCONNECTED) { + type_str = "BY_SYSDUMP_FORUSER_disconnected"; + } else { + type_str = "BY_SYSDUMP_FORUSER"; + } + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + case DUMP_TYPE_AP_ABNORMAL_ACCESS: + type_str = "INVALID_ACCESS"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX: + type_str = "ERROR_RX_TIMED_OUT"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX: + type_str = "ERROR_TX_TIMED_OUT"; + break; + case DUMP_TYPE_CFG_VENDOR_TRIGGERED: + type_str = "CFG_VENDOR_TRIGGERED"; + break; + case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR: + type_str = "BY_INVALID_RING_RDWR"; + break; + case DUMP_TYPE_IFACE_OP_FAILURE: + type_str = "BY_IFACE_OP_FAILURE"; + break; + case DUMP_TYPE_TRANS_ID_MISMATCH: + type_str = "BY_TRANS_ID_MISMATCH"; + break; +#ifdef DEBUG_DNGL_INIT_FAIL + case DUMP_TYPE_DONGLE_INIT_FAILURE: + type_str = "DONGLE_INIT_FAIL"; + break; +#endif /* DEBUG_DNGL_INIT_FAIL */ + case DUMP_TYPE_DONGLE_HOST_EVENT: + type_str = "BY_DONGLE_HOST_EVENT"; + break; + case DUMP_TYPE_SMMU_FAULT: + type_str = "SMMU_FAULT"; + break; + case DUMP_TYPE_BY_USER: + type_str = "BY_USER"; + break; +#ifdef DHD_ERPOM + case DUMP_TYPE_DUE_TO_BT: + type_str = "DUE_TO_BT"; + break; +#endif /* DHD_ERPOM */ + case DUMP_TYPE_LOGSET_BEYOND_RANGE: + type_str = "LOGSET_BEYOND_RANGE"; + break; + case DUMP_TYPE_CTO_RECOVERY: + type_str = "CTO_RECOVERY"; + break; + case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR: + type_str = "SEQUENTIAL_PRIVCMD_ERROR"; + break; + case DUMP_TYPE_PROXD_TIMEOUT: + type_str = "PROXD_TIMEOUT"; + break; + case DUMP_TYPE_PKTID_POOL_DEPLETED: + type_str = "PKTID_POOL_DEPLETED"; + break; + default: + type_str = "Unknown_type"; + break; } - DHD_APF_UNLOCK(ndev); - - return ret; + strncpy(buf, type_str, strlen(type_str)); + buf[strlen(type_str)] = 0; } -int -dhd_dev_apf_disable_filter(struct net_device *ndev) +void +dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); + char memdump_type[32]; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); dhd_pub_t *dhdp = &dhd->pub; - int ret = 0; - - DHD_APF_LOCK(ndev); - if (dhdp->apf_set) { - ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, - PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE); + /* Init file name */ + memset(memdump_path, 0, len); + memset(memdump_type, 0, sizeof(memdump_type)); + dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd); + clear_debug_dump_time(dhdp->debug_dump_time_str); + get_debug_dump_time(dhdp->debug_dump_time_str); + snprintf(memdump_path, len, "%s%s_%s_" "%s", + DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str); + if (strstr(fname, "sssr_dump")) { + DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path); + } else { + DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__, + memdump_path, FILE_NAME_HAL_TAG)); } - - DHD_APF_UNLOCK(ndev); - - return ret; } int -dhd_dev_apf_delete_filter(struct net_device *ndev) +write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname) { - dhd_info_t *dhd = DHD_DEV_INFO(ndev); - dhd_pub_t *dhdp = &dhd->pub; int ret = 0; + char memdump_path[128]; + char memdump_type[32]; + uint32 file_mode; - DHD_APF_LOCK(ndev); - - if (dhdp->apf_set) { - ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); - if (!ret) { - dhdp->apf_set = FALSE; - } - } - - DHD_APF_UNLOCK(ndev); + /* Init file name */ + memset(memdump_path, 0, sizeof(memdump_path)); + memset(memdump_type, 0, sizeof(memdump_type)); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd); + clear_debug_dump_time(dhd->debug_dump_time_str); + get_debug_dump_time(dhd->debug_dump_time_str); + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s", + DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str); + file_mode = O_CREAT | O_WRONLY | O_SYNC; - return ret; -} -#endif /* PKT_FILTER_SUPPORT && APF */ + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path)); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) -static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) -{ - dhd_info_t *dhd; - struct net_device *dev; +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size); +#endif /* DHD_LOG_DUMP */ - dhd = (dhd_info_t *)dhd_info; - if (!dhd || !dhd->iflist[0]) - return; - dev = dhd->iflist[0]->net; + /* Write file */ + ret = write_file(memdump_path, file_mode, buf, size); - if (dev) { - /* - * For HW2, dev_close need to be done to recover - * from upper layer after hang. For Interposer skip - * dev_close so that dhd iovars can be used to take - * socramdump after crash, also skip for HW4 as - * handling of hang event is different - */ -#if defined(WL_WIRELESS_EXT) - wl_iw_send_priv_event(dev, "HANG"); -#endif // endif -#if defined(WL_CFG80211) - wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); -#endif // endif +#ifdef DHD_DUMP_MNGR + if (ret == BCME_OK) { + dhd_dump_file_manage_enqueue(dhd, memdump_path, fname); } -} +#endif /* DHD_DUMP_MNGR */ -#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY -extern dhd_pub_t *link_recovery; -void dhd_host_recover_link(void) -{ - DHD_ERROR(("****** %s ******\n", __FUNCTION__)); - link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN; - dhd_bus_set_linkdown(link_recovery, TRUE); - dhd_os_send_hang_message(link_recovery); + return ret; } -EXPORT_SYMBOL(dhd_host_recover_link); -#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* DHD_DEBUG */ -int dhd_os_send_hang_message(dhd_pub_t *dhdp) +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) { + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; int ret = 0; - if (dhdp) { -#ifdef WL_CFG80211 - struct net_device *primary_ndev; - struct bcm_cfg80211 *cfg; - - primary_ndev = dhd_linux_get_primary_netdev(dhdp); - if (!primary_ndev) { - DHD_ERROR(("%s: Cannot find primary netdev\n", - __FUNCTION__)); - return -ENODEV; - } - - cfg = wl_get_cfg(primary_ndev); - if (!cfg) { - DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); - return -EINVAL; - } - - /* Skip sending HANG event to framework if driver is not ready */ - if (!wl_get_drv_status(cfg, READY, primary_ndev)) { - DHD_ERROR(("%s: device is not ready\n", __FUNCTION__)); - return -ENODEV; - } -#endif /* WL_CFG80211 */ - - if (!dhdp->hang_was_sent) { -#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) - dhdp->hang_counts++; - if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) { - DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n", - __func__, dhdp->hang_counts)); - BUG_ON(1); - } -#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ -#ifdef DHD_DEBUG_UART - /* If PCIe lane has broken, execute the debug uart application - * to gether a ramdump data from dongle via uart - */ - if (!dhdp->info->duart_execute) { - dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, - (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP, - dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH); - } -#endif /* DHD_DEBUG_UART */ - dhdp->hang_was_sent = 1; -#ifdef BT_OVER_SDIO - dhdp->is_bt_recovery_required = TRUE; + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); #endif // endif - dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, - DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH); - DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, - dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); - } + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return ret; } -int net_os_send_hang_message(struct net_device *dev) +int net_os_wake_lock_timeout(struct net_device *dev) { dhd_info_t *dhd = DHD_DEV_INFO(dev); int ret = 0; - if (dhd) { - /* Report FW problem when enabled */ - if (dhd->pub.hang_report) { -#ifdef BT_OVER_SDIO - if (netif_running(dev)) { -#endif /* BT_OVER_SDIO */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - ret = dhd_os_send_hang_message(&dhd->pub); -#else - ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); -#endif // endif -#ifdef BT_OVER_SDIO - } - DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__)); - bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev)); -#endif /* BT_OVER_SDIO */ - } else { - DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", - __FUNCTION__)); - } - } + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); return ret; } -int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) { - dhd_info_t *dhd = NULL; - dhd_pub_t *dhdp = NULL; - int reason; - - dhd = DHD_DEV_INFO(dev); - if (dhd) { - dhdp = &dhd->pub; - } + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; - if (!dhd || !dhdp) { - return 0; + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } + return 0; +} - reason = bcm_strtoul(string_num, NULL, 0); - DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; - if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { - reason = 0; + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } + return 0; +} - dhdp->hang_reason = reason; +int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; - return net_os_send_hang_message(dev); + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + if (wake_lock_active(&dhd->wl_ctrlwake)) + wake_unlock(&dhd->wl_ctrlwake); +#endif // endif + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; } -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ -int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) { dhd_info_t *dhd = DHD_DEV_INFO(dev); - return wifi_platform_set_power(dhd->adapter, on, delay_msec); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; } -bool dhd_force_country_change(struct net_device *dev) +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) { dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; - if (dhd && dhd->pub.up) - return dhd->pub.force_country_change; - return FALSE; + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; } -void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, - wl_country_t *cspec) +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +atomic_t trace_wklock_onoff; +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); -#if defined(DHD_BLOB_EXISTENCE_CHECK) - if (!dhd->pub.is_blob) -#endif /* DHD_BLOB_EXISTENCE_CHECK */ - { -#if defined(CUSTOM_COUNTRY_CODE) - get_customized_country_code(dhd->adapter, country_iso_code, cspec, - dhd->pub.dhd_cflags); + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) #else - get_customized_country_code(dhd->adapter, country_iso_code, cspec); -#endif /* CUSTOM_COUNTRY_CODE */ - } -#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE) - else { - /* Replace the ccode to XZ if ccode is undefined country */ - if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) { - strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ); - strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ); - strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ); - DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code)); + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + if (wklock_info->addr == addr) { + return wklock_info; } } -#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */ - - BCM_REFERENCE(dhd); + return NULL; } -void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); -#ifdef WL_CFG80211 - struct bcm_cfg80211 *cfg = wl_get_cfg(dev); -#endif // endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ - if (dhd && dhd->pub.up) { - memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); -#ifdef WL_CFG80211 - wl_update_wiphybands(cfg, notify); -#endif // endif - } -} +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); -void dhd_bus_band_set(struct net_device *dev, uint band) +static inline void dhd_wk_lock_rec_dump(void) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); -#ifdef WL_CFG80211 - struct bcm_cfg80211 *cfg = wl_get_cfg(dev); -#endif // endif - if (dhd && dhd->pub.up) { -#ifdef WL_CFG80211 - wl_update_wiphybands(cfg, true); -#endif // endif - } + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + printk("wakelock lock : %pS lock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_WAKE_UNLOCK: + printk("wakelock unlock : %pS, unlock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_WAIVE_LOCK: + printk("wakelock waive : %pS before_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_RESTORE_LOCK: + printk("wakelock restore : %pS, after_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + } + } } -int dhd_net_set_fw_path(struct net_device *dev, char *fw) +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - - if (!fw || fw[0] == '\0') - return -EINVAL; - - strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1); - dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0'; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ -#if defined(SOFTAP) - if (strstr(fw, "apsta") != NULL) { - DHD_INFO(("GOT APSTA FIRMWARE\n")); - ap_fw_loaded = TRUE; - } else { - DHD_INFO(("GOT STA FIRMWARE\n")); - ap_fw_loaded = FALSE; - } -#endif // endif - return 0; + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + atomic_set(&trace_wklock_onoff, 1); } -void dhd_net_if_lock(struct net_device *dev) +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - dhd_net_if_lock_local(dhd); -} + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ -void dhd_net_if_unlock(struct net_device *dev) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - dhd_net_if_unlock_local(dhd); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } -static void dhd_net_if_lock_local(dhd_info_t *dhd) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - if (dhd) - mutex_lock(&dhd->dhd_net_if_mutex); -#endif // endif -} + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; -static void dhd_net_if_unlock_local(dhd_info_t *dhd) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - if (dhd) - mutex_unlock(&dhd->dhd_net_if_mutex); -#endif // endif -} + printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); -static void dhd_suspend_lock(dhd_pub_t *pub) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - if (dhd) - mutex_lock(&dhd->dhd_suspend_mutex); -#endif // endif } +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ -static void dhd_suspend_unlock(dhd_pub_t *pub) +int dhd_os_wake_lock(dhd_pub_t *pub) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) dhd_info_t *dhd = (dhd_info_t *)(pub->info); - if (dhd) - mutex_unlock(&dhd->dhd_suspend_mutex); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(pub); #endif // endif + } +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + + return ret; } -unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +void dhd_event_wake_lock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags = 0; - - if (dhd) - spin_lock_irqsave(&dhd->dhd_lock, flags); - return flags; + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(pub); +#endif // endif + } } -void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) +void +dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - if (dhd) - spin_unlock_irqrestore(&dhd->dhd_lock, flags); + if (dhd) { + wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ } -/* Linux specific multipurpose spinlock API */ -void * -dhd_os_spin_lock_init(osl_t *osh) -{ - /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ - /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ - /* and this results in kernel asserts in internal builds */ - spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); - if (lock) - spin_lock_init(lock); - return ((void *)lock); -} void -dhd_os_spin_lock_deinit(osl_t *osh, void *lock) -{ - if (lock) - MFREE(osh, lock, sizeof(spinlock_t) + 4); -} -unsigned long -dhd_os_spin_lock(void *lock) +dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val) { - unsigned long flags = 0; - - if (lock) - spin_lock_irqsave((spinlock_t *)lock, flags); +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); - return flags; -} -void -dhd_os_spin_unlock(void *lock, unsigned long flags) -{ - if (lock) - spin_unlock_irqrestore((spinlock_t *)lock, flags); + if (dhd) { + wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ } -void * -dhd_os_dbgring_lock_init(osl_t *osh) +int net_os_wake_lock(struct net_device *dev) { - struct mutex *mtx = NULL; - - mtx = MALLOCZ(osh, sizeof(*mtx)); - if (mtx) - mutex_init(mtx); - - return mtx; -} + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; -void -dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx) -{ - if (mtx) { - mutex_destroy(mtx); - MFREE(osh, mtx, sizeof(struct mutex)); - } + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; } -static int -dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +int dhd_os_wake_unlock(dhd_pub_t *pub) { - return (atomic_read(&dhd->pend_8021x_cnt)); -} - -#define MAX_WAIT_FOR_8021X_TX 100 + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; -int -dhd_wait_pend8021x(struct net_device *dev) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - int timeout = msecs_to_jiffies(10); - int ntimes = MAX_WAIT_FOR_8021X_TX; - int pend = dhd_get_pend_8021x_cnt(dhd); + dhd_os_wake_lock_timeout(pub); + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - while (ntimes && pend) { - if (pend) { - set_current_state(TASK_INTERRUPTIBLE); - DHD_PERIM_UNLOCK(&dhd->pub); - schedule_timeout(timeout); - DHD_PERIM_LOCK(&dhd->pub); - set_current_state(TASK_RUNNING); - ntimes--; + if (dhd->wakelock_counter > 0) { + dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(pub); +#endif // endif + } + ret = dhd->wakelock_counter; } - pend = dhd_get_pend_8021x_cnt(dhd); - } - if (ntimes == 0) - { - atomic_set(&dhd->pend_8021x_cnt, 0); - DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__)); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } - return pend; + return ret; } -#if defined(DHD_DEBUG) -int write_file(const char * file_name, uint32 flags, uint8 *buf, int size) +void dhd_event_wake_unlock(dhd_pub_t *pub) { - int ret = 0; - struct file *fp = NULL; - mm_segment_t old_fs; - loff_t pos = 0; - /* change to KERNEL_DS address limit */ - old_fs = get_fs(); - set_fs(KERNEL_DS); + dhd_info_t *dhd = (dhd_info_t *)(pub->info); - /* open file to write */ - fp = filp_open(file_name, flags, 0664); - if (IS_ERR(fp)) { - DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp))); - goto exit; + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(pub); +#endif // endif } +} - /* Write buf to file */ - ret = vfs_write(fp, buf, size, &pos); - if (ret < 0) { - DHD_ERROR(("write file error, err = %d\n", ret)); - goto exit; +void dhd_pm_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_pmwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_pmwake)) { + wake_unlock(&dhd->wl_pmwake); + } } +#endif /* CONFIG_HAS_WAKELOCK */ +} - /* Sync file from filesystem to physical media */ - ret = vfs_fsync(fp, 0); - if (ret < 0) { - DHD_ERROR(("sync file error, error = %d\n", ret)); - goto exit; +void dhd_txfl_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_txflwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_txflwake)) { + wake_unlock(&dhd->wl_txflwake); + } } - ret = BCME_OK; +#endif /* CONFIG_HAS_WAKELOCK */ +} -exit: - /* close file before return */ - if (!IS_ERR(fp)) - filp_close(fp, current->files); +int dhd_os_check_wakelock(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO) + dhd_info_t *dhd; - /* restore previous address limit */ - set_fs(old_fs); + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ - return ret; -} +#ifdef CONFIG_HAS_WAKELOCK + /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ + if (dhd && (wake_lock_active(&dhd->wl_wifi) || + (wake_lock_active(&dhd->wl_wdwake)))) + return 1; +#elif defined(BCMSDIO) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) + return 1; #endif // endif + return 0; +} -#ifdef DHD_DEBUG -static void -dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type) +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) { - char *type_str = NULL; +#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO) +#if defined(CONFIG_HAS_WAKELOCK) + int l1, l2, l3, l4, l7, l8, l9; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ + dhd_info_t *dhd; - switch (type) { - case DUMP_TYPE_RESUMED_ON_TIMEOUT: - type_str = "resumed_on_timeout"; - break; - case DUMP_TYPE_D3_ACK_TIMEOUT: - type_str = "D3_ACK_timeout"; - break; - case DUMP_TYPE_DONGLE_TRAP: - type_str = "Dongle_Trap"; - break; - case DUMP_TYPE_MEMORY_CORRUPTION: - type_str = "Memory_Corruption"; - break; - case DUMP_TYPE_PKTID_AUDIT_FAILURE: - type_str = "PKTID_AUDIT_Fail"; - break; - case DUMP_TYPE_PKTID_INVALID: - type_str = "PKTID_INVALID"; - break; - case DUMP_TYPE_SCAN_TIMEOUT: - type_str = "SCAN_timeout"; - break; - case DUMP_TYPE_SCAN_BUSY: - type_str = "SCAN_Busy"; - break; - case DUMP_TYPE_BY_SYSDUMP: - if (substr_type == CMD_UNWANTED) { - type_str = "BY_SYSDUMP_FORUSER_unwanted"; - } else if (substr_type == CMD_DISCONNECTED) { - type_str = "BY_SYSDUMP_FORUSER_disconnected"; - } else { - type_str = "BY_SYSDUMP_FORUSER"; - } - break; - case DUMP_TYPE_BY_LIVELOCK: - type_str = "BY_LIVELOCK"; - break; - case DUMP_TYPE_AP_LINKUP_FAILURE: - type_str = "BY_AP_LINK_FAILURE"; - break; - case DUMP_TYPE_AP_ABNORMAL_ACCESS: - type_str = "INVALID_ACCESS"; - break; - case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX: - type_str = "ERROR_RX_TIMED_OUT"; - break; - case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX: - type_str = "ERROR_TX_TIMED_OUT"; - break; - case DUMP_TYPE_CFG_VENDOR_TRIGGERED: - type_str = "CFG_VENDOR_TRIGGERED"; - break; - case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR: - type_str = "BY_INVALID_RING_RDWR"; - break; - case DUMP_TYPE_IFACE_OP_FAILURE: - type_str = "BY_IFACE_OP_FAILURE"; - break; - case DUMP_TYPE_TRANS_ID_MISMATCH: - type_str = "BY_TRANS_ID_MISMATCH"; - break; -#ifdef DEBUG_DNGL_INIT_FAIL - case DUMP_TYPE_DONGLE_INIT_FAILURE: - type_str = "DONGLE_INIT_FAIL"; - break; -#endif /* DEBUG_DNGL_INIT_FAIL */ - case DUMP_TYPE_DONGLE_HOST_EVENT: - type_str = "BY_DONGLE_HOST_EVENT"; - break; - case DUMP_TYPE_SMMU_FAULT: - type_str = "SMMU_FAULT"; - break; - case DUMP_TYPE_BY_USER: - type_str = "BY_USER"; - break; -#ifdef DHD_ERPOM - case DUMP_TYPE_DUE_TO_BT: - type_str = "DUE_TO_BT"; - break; -#endif /* DHD_ERPOM */ - case DUMP_TYPE_LOGSET_BEYOND_RANGE: - type_str = "LOGSET_BEYOND_RANGE"; - break; - default: - type_str = "Unknown_type"; - break; + if (!pub) { + return 0; + } + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ - strncpy(buf, type_str, strlen(type_str)); - buf[strlen(type_str)] = 0; +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = wake_lock_active(&dhd->wl_wifi); + l2 = wake_lock_active(&dhd->wl_wdwake); + l3 = wake_lock_active(&dhd->wl_rxwake); + l4 = wake_lock_active(&dhd->wl_ctrlwake); + l7 = wake_lock_active(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l8 = wake_lock_active(&dhd->wl_pmwake); + l9 = wake_lock_active(&dhd->wl_txflwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9)); + return 1; + } +#elif defined(BCMSDIO) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + return 1; + } +#endif /* defined(BCMSDIO) */ + return 0; } -int -write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname) +int net_os_wake_unlock(struct net_device *dev) { + dhd_info_t *dhd = DHD_DEV_INFO(dev); int ret = 0; - char memdump_path[128]; - char memdump_type[32]; - struct timeval curtime; - uint32 file_mode; - - /* Init file name */ - memset(memdump_path, 0, sizeof(memdump_path)); - memset(memdump_type, 0, sizeof(memdump_type)); - do_gettimeofday(&curtime); - dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd); - snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", - "/data/misc/wifi/", fname, memdump_type, - (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); - file_mode = O_CREAT | O_WRONLY | O_SYNC; - - /* print SOCRAM dump file path */ - DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path)); -#ifdef DHD_LOG_DUMP - dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size); -#endif /* DHD_LOG_DUMP */ + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} - /* Write file */ - ret = write_file(memdump_path, file_mode, buf, size); +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; -#ifdef DHD_DUMP_MNGR - if (ret == BCME_OK) { - dhd_dump_file_manage_enqueue(dhd, memdump_path, fname); + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + /* if wakelock_wd_counter was never used : lock it at once */ + wake_lock(&dhd->wl_wdwake); +#endif // endif + } + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } -#endif /* DHD_DUMP_MNGR */ - return ret; } -#endif /* DHD_DEBUG */ -int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; int ret = 0; - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? - dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; + if (dhd->wakelock_wd_counter > 0) { + dhd->wakelock_wd_counter = 0; + if (!dhd->waive_wakelock) { #ifdef CONFIG_HAS_WAKELOCK - if (dhd->wakelock_rx_timeout_enable) - wake_lock_timeout(&dhd->wl_rxwake, - msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); - if (dhd->wakelock_ctrl_timeout_enable) - wake_lock_timeout(&dhd->wl_ctrlwake, - msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); + wake_unlock(&dhd->wl_wdwake); #endif // endif - dhd->wakelock_rx_timeout_enable = 0; - dhd->wakelock_ctrl_timeout_enable = 0; + } + } spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return ret; } -int net_os_wake_lock_timeout(struct net_device *dev) +#ifdef BCMPCIE_OOB_HOST_WAKE +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - int ret = 0; +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); - if (dhd) - ret = dhd_os_wake_lock_timeout(&dhd->pub); - return ret; + if (dhd) { + wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ } -int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (val > dhd->wakelock_rx_timeout_enable) - dhd->wakelock_rx_timeout_enable = val; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (dhd) { + /* if wl_intrwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_intrwake)) { + wake_unlock(&dhd->wl_intrwake); + } } - return 0; +#endif /* CONFIG_HAS_WAKELOCK */ } +#endif /* BCMPCIE_OOB_HOST_WAKE */ -int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (val > dhd->wakelock_ctrl_timeout_enable) - dhd->wakelock_ctrl_timeout_enable = val; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (dhd) { + wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); } - return 0; +#endif /* CONFIG_HAS_WAKELOCK */ } -int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - dhd->wakelock_ctrl_timeout_enable = 0; -#ifdef CONFIG_HAS_WAKELOCK - if (wake_lock_active(&dhd->wl_ctrlwake)) - wake_unlock(&dhd->wl_ctrlwake); -#endif // endif - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_scanwake)) { + wake_unlock(&dhd->wl_scanwake); + } } - return 0; +#endif /* CONFIG_HAS_WAKELOCK */ } +#endif /* DHD_USE_SCAN_WAKELOCK */ -int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +/* waive wakelocks for operations such as IOVARs in suspend function, must be closed + * by a paired function call to dhd_wakelock_restore. returns current wakelock counter + */ +int dhd_os_wake_lock_waive(dhd_pub_t *pub) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; int ret = 0; - if (dhd) - ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + /* record current lock status */ + dhd->wakelock_before_waive = dhd->wakelock_counter; + dhd->waive_wakelock = TRUE; + } + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } return ret; } -int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +int dhd_os_wake_lock_restore(dhd_pub_t *pub) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; int ret = 0; - if (dhd) - ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); - return ret; -} - -#if defined(DHD_TRACE_WAKE_LOCK) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) -#include -#else -#include -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + if (!dhd) + return 0; + if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0) + return 0; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) -/* Define 2^5 = 32 bucket size hash table */ -DEFINE_HASHTABLE(wklock_history, 5); -#else -/* Define 2^5 = 32 bucket size hash table */ -struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); -int trace_wklock_onoff = 1; -typedef enum dhd_wklock_type { - DHD_WAKE_LOCK, - DHD_WAKE_UNLOCK, - DHD_WAIVE_LOCK, - DHD_RESTORE_LOCK -} dhd_wklock_t; + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (!dhd->waive_wakelock) + goto exit; -struct wk_trace_record { - unsigned long addr; /* Address of the instruction */ - dhd_wklock_t lock_type; /* lock_type */ - unsigned long long counter; /* counter information */ - struct hlist_node wklock_node; /* hash node */ -}; + dhd->waive_wakelock = FALSE; + /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, + * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases + * the lock in between, do the same by calling wake_unlock or pm_relax + */ +#ifdef DHD_TRACE_WAKE_LOCK + if (atomic_read(&trace_wklock_onoff)) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ -static struct wk_trace_record *find_wklock_entry(unsigned long addr) -{ - struct wk_trace_record *wklock_info; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) -#else - struct hlist_node *entry; - int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); - hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ - { - if (wklock_info->addr == addr) { - return wklock_info; - } + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_stay_awake(&dhd->pub); +#endif // endif + } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif // endif } - return NULL; + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + return ret; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) -#define HASH_ADD(hashtable, node, key) \ - do { \ - hash_add(hashtable, node, key); \ - } while (0); -#else -#define HASH_ADD(hashtable, node, key) \ - do { \ - int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ - hlist_add_head(node, &hashtable[index]); \ - } while (0); -#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ - -#define STORE_WKLOCK_RECORD(wklock_type) \ - do { \ - struct wk_trace_record *wklock_info = NULL; \ - unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ - wklock_info = find_wklock_entry(func_addr); \ - if (wklock_info) { \ - if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ - wklock_info->counter = dhd->wakelock_counter; \ - } else { \ - wklock_info->counter++; \ - } \ - } else { \ - wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ - if (!wklock_info) {\ - printk("Can't allocate wk_trace_record \n"); \ - } else { \ - wklock_info->addr = func_addr; \ - wklock_info->lock_type = wklock_type; \ - if (wklock_type == DHD_WAIVE_LOCK || \ - wklock_type == DHD_RESTORE_LOCK) { \ - wklock_info->counter = dhd->wakelock_counter; \ - } else { \ - wklock_info->counter++; \ - } \ - HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ - } \ - } \ - } while (0); +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + /* wakelocks prevent a system from going into a low power state */ +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); + wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake"); + wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} -static inline void dhd_wk_lock_rec_dump(void) +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) { - int bkt; - struct wk_trace_record *wklock_info; + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); + wake_lock_destroy(&dhd->wl_evtwake); + wake_lock_destroy(&dhd->wl_pmwake); + wake_lock_destroy(&dhd->wl_txflwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +} -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - hash_for_each(wklock_history, bkt, wklock_info, wklock_node) -#else - struct hlist_node *entry = NULL; - int max_index = ARRAY_SIZE(wklock_history); - for (bkt = 0; bkt < max_index; bkt++) - hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ - { - switch (wklock_info->lock_type) { - case DHD_WAKE_LOCK: - printk("wakelock lock : %pS lock_counter : %llu \n", - (void *)wklock_info->addr, wklock_info->counter); - break; - case DHD_WAKE_UNLOCK: - printk("wakelock unlock : %pS, unlock_counter : %llu \n", - (void *)wklock_info->addr, wklock_info->counter); - break; - case DHD_WAIVE_LOCK: - printk("wakelock waive : %pS before_waive : %llu \n", - (void *)wklock_info->addr, wklock_info->counter); - break; - case DHD_RESTORE_LOCK: - printk("wakelock restore : %pS, after_waive : %llu \n", - (void *)wklock_info->addr, wklock_info->counter); - break; - } - } +bool dhd_os_check_if_up(dhd_pub_t *pub) +{ + if (!pub) + return FALSE; + return pub->up; } -static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +/* function to collect firmware, chip id and chip version info */ +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) { - unsigned long flags; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) int i; -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - hash_init(wklock_history); -#else - for (i = 0; i < ARRAY_SIZE(wklock_history); i++) - INIT_HLIST_HEAD(&wklock_history[i]); -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version); + printf("%s\n", info_string); + + if (!dhdp) + return; + + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp), + dhd_conf_get_chiprev(dhdp)); } -static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) { - int bkt; - struct wk_trace_record *wklock_info; - struct hlist_node *tmp; - unsigned long flags; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) - struct hlist_node *entry = NULL; - int max_index = ARRAY_SIZE(wklock_history); -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) -#else - for (bkt = 0; bkt < max_index; bkt++) - hlist_for_each_entry_safe(wklock_info, entry, tmp, - &wklock_history[bkt], wklock_node) -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ - { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - hash_del(&wklock_info->wklock_node); -#else - hlist_del_init(&wklock_info->wklock_node); -#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ - kfree(wklock_info); - } - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (!net || !DEV_PRIV(net)) { + DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n", + __FUNCTION__, net, DEV_PRIV(net))); + return -EINVAL; + } + + dhd = DHD_DEV_INFO(net); + if (!dhd) + return -EINVAL; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; } -void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) { - dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); - unsigned long flags; + struct net_device *net; - printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"); - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - dhd_wk_lock_rec_dump(); - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + net = dhd_idx2net(dhdp, ifidx); + if (!net) { + DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + return dhd_check_hang(net, dhdp, ret); } -#else -#define STORE_WKLOCK_RECORD(wklock_type) -#endif /* ! DHD_TRACE_WAKE_LOCK */ -int dhd_os_wake_lock(dhd_pub_t *pub) +/* Return instance */ +int dhd_get_instance(dhd_pub_t *dhdp) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; + return dhdp->info->unit; +} + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ +int dhd_deepsleep(struct net_device *dev, int flag) +{ + char iovbuf[20]; + uint powervar = 0; + dhd_info_t *dhd; + dhd_pub_t *dhdp; + int cnt = 0; int ret = 0; - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { -#ifdef CONFIG_HAS_WAKELOCK - wake_lock(&dhd->wl_wifi); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_stay_awake(pub); -#endif // endif - } -#ifdef DHD_TRACE_WAKE_LOCK - if (trace_wklock_onoff) { - STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); - } -#endif /* DHD_TRACE_WAKE_LOCK */ - dhd->wakelock_counter++; - ret = dhd->wakelock_counter; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + dhd = DHD_DEV_INFO(dev); + dhdp = &dhd->pub; + + switch (flag) { + case 1 : /* Deepsleep on */ + DHD_ERROR(("[WiFi] Deepsleep On\n")); + /* give some time to sysioc_work before deepsleep */ + OSL_SLEEP(200); +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhdp); +#endif /* PKT_FILTER_SUPPORT */ + /* Disable MPC */ + powervar = 0; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, + 0, TRUE); + + /* Enable Deepsleep */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar), + NULL, 0, TRUE); + break; + + case 0: /* Deepsleep Off */ + DHD_ERROR(("[WiFi] Deepsleep Off\n")); + + /* Disable Deepsleep */ + for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) { + powervar = 0; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), NULL, 0, TRUE); + + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("the error of dhd deepsleep status" + " ret value :%d\n", ret)); + } else { + if (!(*(int *)iovbuf)) { + DHD_ERROR(("deepsleep mode is 0," + " count: %d\n", cnt)); + break; + } + } + } + + /* Enable MPC */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, + 0, TRUE); + break; } - return ret; + return 0; } +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ -void dhd_event_wake_lock(dhd_pub_t *pub) -{ - dhd_info_t *dhd = (dhd_info_t *)(pub->info); +#ifdef PROP_TXSTATUS - if (dhd) { -#ifdef CONFIG_HAS_WAKELOCK - wake_lock(&dhd->wl_evtwake); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_stay_awake(pub); -#endif // endif - } +void dhd_wlfc_plat_init(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; } -void -dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val) +void dhd_wlfc_plat_deinit(void *dhd) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - - if (dhd) { - wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val)); - } -#endif /* CONFIG_HAS_WAKE_LOCK */ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; } -void -dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val) +bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); +#ifdef SKIP_WLFC_ON_CONCURRENT - if (dhd) { - wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val)); - } -#endif /* CONFIG_HAS_WAKE_LOCK */ +#ifdef WL_CFG80211 + struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx); + if (net) + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode(net)); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else + return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ + return FALSE; } +#endif /* PROP_TXSTATUS */ -int net_os_wake_lock(struct net_device *dev) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - int ret = 0; +#ifdef BCMDBGFS +#include - if (dhd) - ret = dhd_os_wake_lock(&dhd->pub); - return ret; -} +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; -int dhd_os_wake_unlock(dhd_pub_t *pub) -{ - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; +dhd_dbgfs_t g_dbgfs; - dhd_os_wake_lock_timeout(pub); - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); - if (dhd->wakelock_counter > 0) { - dhd->wakelock_counter--; -#ifdef DHD_TRACE_WAKE_LOCK - if (trace_wklock_onoff) { - STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); - } -#endif /* DHD_TRACE_WAKE_LOCK */ - if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { -#ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_wifi); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_relax(pub); -#endif // endif - } - ret = dhd->wakelock_counter; - } - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - } - return ret; +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; } -void dhd_event_wake_unlock(dhd_pub_t *pub) +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; - if (dhd) { -#ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_evtwake); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_relax(pub); -#endif // endif - } -} + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; -void dhd_pm_wake_unlock(dhd_pub_t *pub) -{ -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); - if (dhd) { - /* if wl_pmwake is active, unlock it */ - if (wake_lock_active(&dhd->wl_pmwake)) { - wake_unlock(&dhd->wl_pmwake); - } - } -#endif /* CONFIG_HAS_WAKELOCK */ -} + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; -void dhd_txfl_wake_unlock(dhd_pub_t *pub) -{ -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + count -= ret; + *ppos = pos + count; + rval = count; - if (dhd) { - /* if wl_txflwake is active, unlock it */ - if (wake_lock_active(&dhd->wl_txflwake)) { - wake_unlock(&dhd->wl_txflwake); - } - } -#endif /* CONFIG_HAS_WAKELOCK */ + return rval; } -int dhd_os_check_wakelock(dhd_pub_t *pub) +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { -#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ - KERNEL_VERSION(2, 6, 36))) - dhd_info_t *dhd; + loff_t pos = *ppos; + size_t ret; + uint32 buf; - if (!pub) + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) return 0; - dhd = (dhd_info_t *)(pub->info); -#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; -#ifdef CONFIG_HAS_WAKELOCK - /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ - if (dhd && (wake_lock_active(&dhd->wl_wifi) || - (wake_lock_active(&dhd->wl_wdwake)))) - return 1; -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) - return 1; -#endif // endif - return 0; + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; } -int -dhd_os_check_wakelock_all(dhd_pub_t *pub) +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) { -#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ - KERNEL_VERSION(2, 6, 36))) -#if defined(CONFIG_HAS_WAKELOCK) - int l1, l2, l3, l4, l7, l8, l9; - int l5 = 0, l6 = 0; - int c, lock_active; -#endif /* CONFIG_HAS_WAKELOCK */ - dhd_info_t *dhd; + loff_t pos = -1; - if (!pub) { - return 0; - } - dhd = (dhd_info_t *)(pub->info); - if (!dhd) { - return 0; + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; } -#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} -#ifdef CONFIG_HAS_WAKELOCK - c = dhd->wakelock_counter; - l1 = wake_lock_active(&dhd->wl_wifi); - l2 = wake_lock_active(&dhd->wl_wdwake); - l3 = wake_lock_active(&dhd->wl_rxwake); - l4 = wake_lock_active(&dhd->wl_ctrlwake); - l7 = wake_lock_active(&dhd->wl_evtwake); -#ifdef BCMPCIE_OOB_HOST_WAKE - l5 = wake_lock_active(&dhd->wl_intrwake); -#endif /* BCMPCIE_OOB_HOST_WAKE */ -#ifdef DHD_USE_SCAN_WAKELOCK - l6 = wake_lock_active(&dhd->wl_scanwake); -#endif /* DHD_USE_SCAN_WAKELOCK */ - l8 = wake_lock_active(&dhd->wl_pmwake); - l9 = wake_lock_active(&dhd->wl_txflwake); - lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9); +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; - /* Indicate to the Host to avoid going to suspend if internal locks are up */ - if (lock_active) { - DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " - "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n", - __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9)); - return 1; - } -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { - return 1; +static void dhd_dbgfs_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); } -#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ - return 0; } -int net_os_wake_unlock(struct net_device *dev) +void dhd_dbgfs_init(dhd_pub_t *dhdp) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - int ret = 0; + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ - if (dhd) - ret = dhd_os_wake_unlock(&dhd->pub); - return ret; + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbgfs_create(); + + return; } -int dhd_os_wd_wake_lock(dhd_pub_t *pub) +void dhd_dbgfs_remove(void) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); - if (dhd) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) { -#ifdef CONFIG_HAS_WAKELOCK - /* if wakelock_wd_counter was never used : lock it at once */ - wake_lock(&dhd->wl_wdwake); -#endif // endif - } - dhd->wakelock_wd_counter++; - ret = dhd->wakelock_wd_counter; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - } - return ret; + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); } +#endif /* BCMDBGFS */ -int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +#ifdef CUSTOM_SET_CPUCORE +void dhd_set_cpucore(dhd_pub_t *dhd, int set) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; + int e_dpc = 0, e_rxf = 0, retry_set = 0; - if (dhd) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (dhd->wakelock_wd_counter > 0) { - dhd->wakelock_wd_counter = 0; - if (!dhd->waive_wakelock) { -#ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_wdwake); -#endif // endif + if (!(dhd->chan_isvht80)) { + DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); + return; + } + + if (DPC_CPUCORE) { + do { + if (set == TRUE) { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(DPC_CPUCORE)); + } else { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(PRIMARY_CPUCORE)); } - } - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); + return; + } + if (e_dpc < 0) + OSL_SLEEP(1); + } while (e_dpc < 0); } - return ret; + if (RXF_CPUCORE) { + do { + if (set == TRUE) { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(RXF_CPUCORE)); + } else { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); + return; + } + if (e_rxf < 0) + OSL_SLEEP(1); + } while (e_rxf < 0); + } + DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + + return; } +#endif /* CUSTOM_SET_CPUCORE */ -#ifdef BCMPCIE_OOB_HOST_WAKE -void -dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +#ifdef DHD_MCAST_REGEN +/* Get interface specific ap_isolate configuration */ +int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - if (dhd) { - wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); - } -#endif /* CONFIG_HAS_WAKELOCK */ + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->mcast_regen_bss_enable; } -void -dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +/* Set interface specific mcast_regen configuration */ +int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - if (dhd) { - /* if wl_intrwake is active, unlock it */ - if (wake_lock_active(&dhd->wl_intrwake)) { - wake_unlock(&dhd->wl_intrwake); - } - } -#endif /* CONFIG_HAS_WAKELOCK */ + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->mcast_regen_bss_enable = val; + + /* Disable rx_pkt_chain feature for interface, if mcast_regen feature + * is enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; } -#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif /* DHD_MCAST_REGEN */ -#ifdef DHD_USE_SCAN_WAKELOCK -void -dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - if (dhd) { - wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); - } -#endif /* CONFIG_HAS_WAKELOCK */ + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; } -void -dhd_os_scan_wake_unlock(dhd_pub_t *pub) +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) { -#ifdef CONFIG_HAS_WAKELOCK - dhd_info_t *dhd = (dhd_info_t *)(pub->info); + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - if (dhd) { - /* if wl_scanwake is active, unlock it */ - if (wake_lock_active(&dhd->wl_scanwake)) { - wake_unlock(&dhd->wl_scanwake); - } - } -#endif /* CONFIG_HAS_WAKELOCK */ + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + ifp->ap_isolate = val; + + return 0; } -#endif /* DHD_USE_SCAN_WAKELOCK */ -/* waive wakelocks for operations such as IOVARs in suspend function, must be closed - * by a paired function call to dhd_wakelock_restore. returns current wakelock counter - */ -int dhd_os_wake_lock_waive(dhd_pub_t *pub) +#ifdef DHD_FW_COREDUMP +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; + unsigned long flags = 0; + dhd_dump_t *dump = NULL; + dhd_info_t *dhd_info = NULL; +#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + log_dump_type_t type = DLD_BUF_TYPE_ALL; +#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ - if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_info = (dhd_info_t *)dhdp->info; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; +#ifdef BCMPCIE + dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf), + (uint32 *)(&dump->hscb_bufsize)); +#else /* BCMPCIE */ + dump->hscb_bufsize = 0; +#endif /* BCMPCIE */ - /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ - if (dhd->waive_wakelock == FALSE) { -#ifdef DHD_TRACE_WAKE_LOCK - if (trace_wklock_onoff) { - STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); - } -#endif /* DHD_TRACE_WAKE_LOCK */ - /* record current lock status */ - dhd->wakelock_before_waive = dhd->wakelock_counter; - dhd->waive_wakelock = TRUE; +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhdp, "memdump", buf, size); +#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) + /* Print out buffer infomation */ + dhd_log_dump_buf_addr(dhdp, &type); +#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ +#endif /* DHD_LOG_DUMP */ + + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + +#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \ + defined(DNGL_AXI_ERROR_LOGGING) + if ( +#if defined(DEBUG_DNGL_INIT_FAIL) + (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) || +#endif /* DEBUG_DNGL_INIT_FAIL */ +#ifdef DHD_ERPOM + (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) || +#endif /* DHD_ERPOM */ +#ifdef DNGL_AXI_ERROR_LOGGING + (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) || +#endif /* DNGL_AXI_ERROR_LOGGING */ + FALSE) + { +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP) + log_dump_type_t *flush_type = NULL; +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */ + dhd_info->scheduled_memdump = FALSE; + (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0); +#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP) + /* for dongle init fail cases, 'dhd_mem_dump' does + * not call 'dhd_log_dump', so call it here. + */ + flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhdp->info, flush_type, 0); } - ret = dhd->wakelock_wd_counter; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */ + return; } - return ret; +#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */ + + dhd_info->scheduled_memdump = TRUE; + /* bus busy bit for mem dump will be cleared in mem dump + * work item context, after mem dump file is written + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH); } -int dhd_os_wake_lock_restore(dhd_pub_t *pub) +static int +dhd_mem_dump(void *handle, void *event_info, u8 event) { - dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = NULL; + unsigned long flags = 0; int ret = 0; + dhd_dump_t *dump = NULL; - if (!dhd) - return 0; - if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0) - return 0; + DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return -ENODEV; + } - /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ - if (!dhd->waive_wakelock) + dhdp = &dhd->pub; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); + ret = -ENODEV; goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); - dhd->waive_wakelock = FALSE; - /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, - * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases - * the lock in between, do the same by calling wake_unlock or pm_relax +#ifdef DHD_SSSR_DUMP + if (dhdp->sssr_inited && dhdp->collect_sssr) { + dhdpcie_sssr_dump(dhdp); + } + dhdp->collect_sssr = FALSE; +#endif /* DHD_SSSR_DUMP */ +#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) + dhd_wait_for_file_dump(dhdp); +#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */ + + dump = (dhd_dump_t *)event_info; + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + ret = -EINVAL; + goto exit; + } + + /* + * If kernel does not have file write access enabled + * then skip writing dumps to files. + * The dumps will be pushed to HAL layer which will + * write into files */ -#ifdef DHD_TRACE_WAKE_LOCK - if (trace_wklock_onoff) { - STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); +#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL + + if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ } -#endif /* DHD_TRACE_WAKE_LOCK */ - if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { -#ifdef CONFIG_HAS_WAKELOCK - wake_lock(&dhd->wl_wifi); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_stay_awake(&dhd->pub); -#endif // endif - } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { -#ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_wifi); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_relax(&dhd->pub); -#endif // endif + /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue + * context, no need to schedule another work queue for log dump. In case of + * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP), + * cfg layer is itself scheduling the log_dump work queue. + * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not + * collect debug_dump as it may be called from non-sleepable context. + */ +#ifdef DHD_LOG_DUMP + if (dhd->scheduled_memdump && + dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhd, flush_type, 0); + } } - dhd->wakelock_before_waive = 0; -exit: - ret = dhd->wakelock_wd_counter; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - return ret; -} +#endif /* DHD_LOG_DUMP */ -void dhd_os_wake_lock_init(struct dhd_info *dhd) -{ - DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); - dhd->wakelock_counter = 0; - dhd->wakelock_rx_timeout_enable = 0; - dhd->wakelock_ctrl_timeout_enable = 0; - /* wakelocks prevent a system from going into a low power state */ -#ifdef CONFIG_HAS_WAKELOCK - // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry - wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); - wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); - wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); - wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake"); - wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake"); -#ifdef BCMPCIE_OOB_HOST_WAKE - wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); -#endif /* BCMPCIE_OOB_HOST_WAKE */ -#ifdef DHD_USE_SCAN_WAKELOCK - wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); -#endif /* DHD_USE_SCAN_WAKELOCK */ -#endif /* CONFIG_HAS_WAKELOCK */ -#ifdef DHD_TRACE_WAKE_LOCK - dhd_wk_lock_trace_init(dhd); -#endif /* DHD_TRACE_WAKE_LOCK */ -} + clear_debug_dump_time(dhdp->debug_dump_time_str); -void dhd_os_wake_lock_destroy(struct dhd_info *dhd) -{ - DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); -#ifdef CONFIG_HAS_WAKELOCK - dhd->wakelock_counter = 0; - dhd->wakelock_rx_timeout_enable = 0; - dhd->wakelock_ctrl_timeout_enable = 0; - // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry - wake_lock_destroy(&dhd->wl_rxwake); - wake_lock_destroy(&dhd->wl_ctrlwake); - wake_lock_destroy(&dhd->wl_evtwake); - wake_lock_destroy(&dhd->wl_pmwake); - wake_lock_destroy(&dhd->wl_txflwake); -#ifdef BCMPCIE_OOB_HOST_WAKE - wake_lock_destroy(&dhd->wl_intrwake); -#endif /* BCMPCIE_OOB_HOST_WAKE */ -#ifdef DHD_USE_SCAN_WAKELOCK - wake_lock_destroy(&dhd->wl_scanwake); -#endif /* DHD_USE_SCAN_WAKELOCK */ -#ifdef DHD_TRACE_WAKE_LOCK - dhd_wk_lock_trace_deinit(dhd); -#endif /* DHD_TRACE_WAKE_LOCK */ -#endif /* CONFIG_HAS_WAKELOCK */ -} + /* before calling bug on, wait for other logs to be dumped. + * we cannot wait in case dhd_mem_dump is called directly + * as it may not be in a sleepable context + */ + if (dhd->scheduled_memdump) { + uint bitmask = 0; + int timeleft = 0; +#ifdef DHD_SSSR_DUMP + bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP; +#endif // endif + if (bitmask != 0) { + DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + timeleft = dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, bitmask, 0); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + } + } + } -bool dhd_os_check_if_up(dhd_pub_t *pub) -{ - if (!pub) - return FALSE; - return pub->up; -} + if (dump->hscb_buf && dump->hscb_bufsize) { + DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__)); + if (write_dump_to_file(&dhd->pub, dump->hscb_buf, + dump->hscb_bufsize, "mem_dump_hscb")) { + DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } + } +#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */ -/* function to collect firmware, chip id and chip version info */ -void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) -{ - int i; + DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif /* DHD_LOG_DUMP */ + dhd->pub.memdump_type != DUMP_TYPE_BY_USER && +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success == TRUE && +#endif /* DHD_DEBUG_UART */ +#ifdef DNGL_EVENT_SUPPORT + dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT && +#endif /* DNGL_EVENT_SUPPORT */ + dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) { - i = snprintf(info_string, sizeof(info_string), - " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version); - printf("%s\n", info_string); +#ifdef SHOW_LOGTRACE + /* Wait till logtrace context is flushed */ + dhd_flush_logtrace_process(dhd); +#endif /* SHOW_LOGTRACE */ - if (!dhdp) - return; + DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__)); + //BUG_ON(1); + } + DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type)); - i = snprintf(&info_string[i], sizeof(info_string) - i, - "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp), - dhd_conf_get_chiprev(dhdp)); +exit: + if (dump) { + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); + } + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + dhd->scheduled_memdump = FALSE; + if (dhdp->hang_was_pending) { + DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__)); + dhd_os_send_hang_message(dhdp); + dhdp->hang_was_pending = 0; + } + DHD_ERROR(("%s: EXIT \n", __FUNCTION__)); + return ret; } +#endif /* DHD_FW_COREDUMP */ -int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +#ifdef DHD_SSSR_DUMP +int +dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len) { - int ifidx; - int ret = 0; - dhd_info_t *dhd = NULL; + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + uint dig_buf_size = 0; - if (!net || !DEV_PRIV(net)) { - DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n", - __FUNCTION__, net, DEV_PRIV(net))); - return -EINVAL; + if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size; + } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size; } - dhd = DHD_DEV_INFO(net); - if (!dhd) - return -EINVAL; - - ifidx = dhd_net2idx(dhd, net); - if (ifidx == DHD_BAD_IF) { - DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); - return -ENODEV; + if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before, + NULL, user_buf, dig_buf_size, &pos); } + return ret; +} - DHD_OS_WAKE_LOCK(&dhd->pub); - DHD_PERIM_LOCK(&dhd->pub); - - ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); - dhd_check_hang(net, &dhd->pub, ret); - - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); +int +dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len) +{ + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + uint dig_buf_size = 0; + + if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size; + } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size; + } + if (dhdp->sssr_dig_buf_after) { + ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after, + NULL, user_buf, dig_buf_size, &pos); + } return ret; } -bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +int +dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core) { - struct net_device *net; + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; - net = dhd_idx2net(dhdp, ifidx); - if (!net) { - DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); - return -EINVAL; + if (dhdp->sssr_d11_before[core] && + dhdp->sssr_d11_outofreset[core] && + (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core], + NULL, user_buf, len, &pos); } - - return dhd_check_hang(net, dhdp, ret); + return ret; } -/* Return instance */ -int dhd_get_instance(dhd_pub_t *dhdp) +int +dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core) { - return dhdp->info->unit; + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhd_pub_t *dhdp = &dhd_info->pub; + int pos = 0, ret = BCME_ERROR; + + if (dhdp->sssr_d11_after[core] && + dhdp->sssr_d11_outofreset[core]) { + ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core], + NULL, user_buf, len, &pos); + } + return ret; } -#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) -#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ -int dhd_deepsleep(struct net_device *dev, int flag) +static void +dhd_sssr_dump_to_file(dhd_info_t* dhdinfo) { - char iovbuf[20]; - uint powervar = 0; - dhd_info_t *dhd; + dhd_info_t *dhd = dhdinfo; dhd_pub_t *dhdp; - int cnt = 0; - int ret = 0; + int i; + char before_sr_dump[128]; + char after_sr_dump[128]; + unsigned long flags = 0; + uint dig_buf_size = 0; - dhd = DHD_DEV_INFO(dev); - dhdp = &dhd->pub; + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); - switch (flag) { - case 1 : /* Deepsleep on */ - DHD_ERROR(("[WiFi] Deepsleep On\n")); - /* give some time to sysioc_work before deepsleep */ - OSL_SLEEP(200); -#ifdef PKT_FILTER_SUPPORT - /* disable pkt filter */ - dhd_enable_packet_filter(0, dhdp); -#endif /* PKT_FILTER_SUPPORT */ - /* Disable MPC */ - powervar = 0; - ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, - 0, TRUE); + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } - /* Enable Deepsleep */ - powervar = 1; - ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar), - NULL, 0, TRUE); - break; + dhdp = &dhd->pub; - case 0: /* Deepsleep Off */ - DHD_ERROR(("[WiFi] Deepsleep Off\n")); + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__)); + goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); - /* Disable Deepsleep */ - for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) { - powervar = 0; - ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, - sizeof(powervar), NULL, 0, TRUE); + for (i = 0; i < MAX_NUM_D11CORES; i++) { + /* Init file name */ + memset(before_sr_dump, 0, sizeof(before_sr_dump)); + memset(after_sr_dump, 0, sizeof(after_sr_dump)); - ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, - sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE); - if (ret < 0) { - DHD_ERROR(("the error of dhd deepsleep status" - " ret value :%d\n", ret)); - } else { - if (!(*(int *)iovbuf)) { - DHD_ERROR(("deepsleep mode is 0," - " count: %d\n", cnt)); - break; - } - } + snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s", + "sssr_dump_core", i, "before_SR"); + snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s", + "sssr_dump_core", i, "after_SR"); + + if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] && + (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) { + DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n", + __FUNCTION__)); + } + } + if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) { + DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n", + __FUNCTION__)); } + } + } - /* Enable MPC */ - powervar = 1; - ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, - 0, TRUE); - break; + if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size; + } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size; } - return 0; -} -#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before, + dig_buf_size, "sssr_dump_dig_before_SR")) { + DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n", + __FUNCTION__)); + } + } -#ifdef PROP_TXSTATUS + if (dhdp->sssr_dig_buf_after) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after, + dig_buf_size, "sssr_dump_dig_after_SR")) { + DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n", + __FUNCTION__)); + } + } -void dhd_wlfc_plat_init(void *dhd) -{ -#ifdef USE_DYNAMIC_F2_BLKSIZE - dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); -#endif /* USE_DYNAMIC_F2_BLKSIZE */ - return; +exit: + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); } -void dhd_wlfc_plat_deinit(void *dhd) +void +dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode) { -#ifdef USE_DYNAMIC_F2_BLKSIZE - dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); -#endif /* USE_DYNAMIC_F2_BLKSIZE */ + dhdp->sssr_dump_mode = dump_mode; + + /* + * If kernel does not have file write access enabled + * then skip writing dumps to files. + * The dumps will be pushed to HAL layer which will + * write into files + */ +#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) return; +#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */ + + /* + * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump + * Without workqueue - + * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT + * : These are called in own handler, not in the interrupt context + * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue + * Thus, it doesn't neeed to dump SSSR in workqueue + */ + DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__)); + dhd_sssr_dump_to_file(dhdp->info); + } +#endif /* DHD_SSSR_DUMP */ -bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx) +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) { -#ifdef SKIP_WLFC_ON_CONCURRENT + dhd_info_t *dhd = handle; + log_dump_type_t *type = (log_dump_type_t *)event_info; + + if (!dhd || !type) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } #ifdef WL_CFG80211 - struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx); - if (net) - /* enable flow control in vsdb mode */ - return !(wl_cfg80211_is_concurrent_mode(net)); -#else - return TRUE; /* skip flow control */ -#endif /* WL_CFG80211 */ + /* flush the fw side logs */ + wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub), + FW_LOGSET_MASK_ALL); +#endif // endif + /* there are currently 3 possible contexts from which + * log dump can be scheduled - + * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command + * 3.HEALTH CHECK event + * The concise debug info buffer is a shared resource + * and in case a trap is one of the contexts then both the + * scheduled work queues need to run because trap data is + * essential for debugging. Hence a mutex lock is acquired + * before calling do_dhd_log_dump(). + */ + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_os_logdump_lock(&dhd->pub); + DHD_OS_WAKE_LOCK(&dhd->pub); + if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + } + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_os_logdump_unlock(&dhd->pub); +} -#else - return FALSE; -#endif /* SKIP_WLFC_ON_CONCURRENT */ - return FALSE; +void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type) +{ + DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + type, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH); } -#endif /* PROP_TXSTATUS */ -#ifdef BCMDBGFS -#include +static void +dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size) +{ +#if 0 +#ifdef DHD_FW_COREDUMP + if ((dhdp->memdump_enabled == DUMP_MEMONLY) || + (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) || + (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) +#else + if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) +#endif + { +#if defined(CONFIG_ARM64) + DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", + name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", + name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + } +#endif +} -typedef struct dhd_dbgfs { - struct dentry *debugfs_dir; - struct dentry *debugfs_mem; - dhd_pub_t *dhdp; - uint32 size; -} dhd_dbgfs_t; +static void +dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + int i; + unsigned long wr_size = 0; + struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; + size_t log_size = 0; + char buf_name[DHD_PRINT_BUF_NAME_LEN]; + dhd_dbg_ring_t *ring = NULL; -dhd_dbgfs_t g_dbgfs; + BCM_REFERENCE(ring); -extern uint32 dhd_readregl(void *bp, uint32 addr); -extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + log_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + if (dld_buf->wraparound) { + wr_size = log_size; + } else { + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + } + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size); + } -static int -dhd_dbg_state_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} +#ifdef EWP_ECNTRS_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { -static ssize_t -dhd_dbg_state_read(struct file *file, char __user *ubuf, - size_t count, loff_t *ppos) -{ - ssize_t rval; - uint32 tmp; - loff_t pos = *ppos; - size_t ret; + ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE); + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf, + LOG_DUMP_ECNTRS_MAX_BUFSIZE); + } +#endif /* EWP_ECNTRS_LOGGING */ - if (pos < 0) - return -EINVAL; - if (pos >= g_dbgfs.size || !count) - return 0; - if (count > g_dbgfs.size - pos) - count = g_dbgfs.size - pos; +#ifdef DHD_STATUS_LOGGING + if (dhdp->statlog) { + dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp), + dhd_statlog_get_logbuf_len(dhdp)); + } +#endif /* DHD_STATUS_LOGGING */ - /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ - tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); +#ifdef EWP_RTT_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_rtt_enable && + dhdp->rtt_dbg_ring) { - ret = copy_to_user(ubuf, &tmp, 4); - if (ret == count) - return -EFAULT; + ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring; + dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE); + dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf, + LOG_DUMP_RTT_MAX_BUFSIZE); + } +#endif /* EWP_RTT_LOGGING */ - count -= ret; - *ppos = pos + count; - rval = count; +#ifdef BCMPCIE + if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) { + dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } +#endif /* BCMPCIE */ - return rval; +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) + /* if health check event was received */ + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data, + HEALTH_CHK_BUF_SIZE); + } +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + + /* append the concise debug information */ + if (dhdp->concise_dbg_buf) { + dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf, + CONCISE_DUMP_BUFLEN); + } } -static ssize_t -dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +#ifdef DHD_SSSR_DUMP +int +dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len) { - loff_t pos = *ppos; - size_t ret; - uint32 buf; + int i = 0; - if (pos < 0) - return -EINVAL; - if (pos >= g_dbgfs.size || !count) - return 0; - if (count > g_dbgfs.size - pos) - count = g_dbgfs.size - pos; + DHD_ERROR(("%s\n", __FUNCTION__)); - ret = copy_from_user(&buf, ubuf, sizeof(uint32)); - if (ret == count) - return -EFAULT; + /* core 0 */ + i = 0; + if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] && + (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size); + DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_C0_D11_BEFORE])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE", + dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]); +#endif /* DHD_LOG_DUMP */ + } + if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) { + arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size); + DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_C0_D11_AFTER])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER", + dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]); +#endif /* DHD_LOG_DUMP */ + } - /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ - dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + /* core 1 */ + i = 1; + if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] && + (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size); + DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_C1_D11_BEFORE])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE", + dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]); +#endif /* DHD_LOG_DUMP */ + } + if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) { + arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size); + DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_C1_D11_AFTER])); +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER", + dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]); +#endif /* DHD_LOG_DUMP */ + } - return count; + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size); + arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size); + DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_BEFORE])); + DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_AFTER])); +#ifdef DHD_LOG_DUMP + if (dhd->sssr_dig_buf_before) { + dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE", + dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]); + } + if (dhd->sssr_dig_buf_after) { + dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER", + dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]); + } +#endif /* DHD_LOG_DUMP */ + } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { + arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size); + arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size); + DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_BEFORE])); + DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__, + arr_len[SSSR_DIG_AFTER])); +#ifdef DHD_LOG_DUMP + if (dhd->sssr_dig_buf_before) { + dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE", + dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]); + } + if (dhd->sssr_dig_buf_after) { + dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER", + dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]); + } +#endif /* DHD_LOG_DUMP */ + } + return BCME_OK; } -loff_t -dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +void +dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len) { - loff_t pos = -1; + dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhd_pub_t *dhdp = &dhd_info->pub; - switch (whence) { - case 0: - pos = off; - break; - case 1: - pos = file->f_pos + off; - break; - case 2: - pos = g_dbgfs.size - off; + if (dhdp->sssr_dump_collected) { + dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len); } - return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); } +#endif /* DHD_SSSR_DUMP */ -static const struct file_operations dhd_dbg_state_ops = { - .read = dhd_dbg_state_read, - .write = dhd_debugfs_write, - .open = dhd_dbg_state_open, - .llseek = dhd_debugfs_lseek -}; - -static void dhd_dbgfs_create(void) +uint32 +dhd_get_time_str_len() { - if (g_dbgfs.debugfs_dir) { - g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, - NULL, &dhd_dbg_state_ops); - } + char *ts = NULL, time_str[128]; + + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, sizeof(time_str), + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); + return strlen(time_str); } -void dhd_dbgfs_init(dhd_pub_t *dhdp) +#if defined(BCMPCIE) +uint32 +dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp) { - g_dbgfs.dhdp = dhdp; - g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; - g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); - if (IS_ERR(g_dbgfs.debugfs_dir)) { - g_dbgfs.debugfs_dir = NULL; - return; + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; } - dhd_dbgfs_create(); + if (!dhdp) + return length; - return; + if (dhdp->extended_trap_data) { + length = (strlen(EXT_TRAP_LOG_HDR) + + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } + return length; } +#endif -void dhd_dbgfs_remove(void) +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) +uint32 +dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp) { - debugfs_remove(g_dbgfs.debugfs_mem); - debugfs_remove(g_dbgfs.debugfs_dir); + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; - bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } + + if (!dhdp) + return length; + + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + length = (strlen(HEALTH_CHK_LOG_HDR) + + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE); + } + return length; } -#endif /* BCMDBGFS */ +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ -#ifdef CUSTOM_SET_CPUCORE -void dhd_set_cpucore(dhd_pub_t *dhd, int set) +uint32 +dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp) { - int e_dpc = 0, e_rxf = 0, retry_set = 0; + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + uint32 remain_len = 0; - if (!(dhd->chan_isvht80)) { - DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); - return; + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; } - if (DPC_CPUCORE) { - do { - if (set == TRUE) { - e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, - cpumask_of(DPC_CPUCORE)); - } else { - e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, - cpumask_of(PRIMARY_CPUCORE)); - } - if (retry_set++ > MAX_RETRY_SET_CPUCORE) { - DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); - return; - } - if (e_dpc < 0) - OSL_SLEEP(1); - } while (e_dpc < 0); - } - if (RXF_CPUCORE) { - do { - if (set == TRUE) { - e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, - cpumask_of(RXF_CPUCORE)); - } else { - e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, - cpumask_of(PRIMARY_CPUCORE)); - } - if (retry_set++ > MAX_RETRY_SET_CPUCORE) { - DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); - return; - } - if (e_rxf < 0) - OSL_SLEEP(1); - } while (e_rxf < 0); - } -#ifdef DHD_OF_SUPPORT - interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE); -#endif /* DHD_OF_SUPPORT */ - DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + if (!dhdp) + return length; - return; + if (dhdp->concise_dbg_buf) { + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0) { + DHD_ERROR(("%s: error getting concise debug info !\n", + __FUNCTION__)); + return length; + } + length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) + + (CONCISE_DUMP_BUFLEN - remain_len)); + } + return length; } -#endif /* CUSTOM_SET_CPUCORE */ -#ifdef DHD_MCAST_REGEN -/* Get interface specific ap_isolate configuration */ -int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx) +uint32 +dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp) { - dhd_info_t *dhd = dhdp->info; - dhd_if_t *ifp; + int length = 0; + dhd_info_t *dhd_info; + + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } - ASSERT(idx < DHD_MAX_IFS); + if (!dhdp) + return length; - ifp = dhd->iflist[idx]; + if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { + length = dhd_log_dump_cookie_len(dhdp); + } + return length; - return ifp->mcast_regen_bss_enable; } -/* Set interface specific mcast_regen configuration */ -int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val) +#ifdef DHD_DUMP_PCIE_RINGS +uint32 +dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp) { - dhd_info_t *dhd = dhdp->info; - dhd_if_t *ifp; + int length = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + uint16 h2d_flowrings_total; + uint32 remain_len = 0; - ASSERT(idx < DHD_MAX_IFS); + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } - ifp = dhd->iflist[idx]; + if (!dhdp) + return length; - ifp->mcast_regen_bss_enable = val; + if (dhdp->concise_dbg_buf) { + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0) { + DHD_ERROR(("%s: error getting concise debug info !\n", + __FUNCTION__)); + return length; + } + } - /* Disable rx_pkt_chain feature for interface, if mcast_regen feature - * is enabled - */ - dhd_update_rx_pkt_chainable_state(dhdp, idx); - return BCME_OK; + length += strlen(FLOWRING_DUMP_HDR); + length += CONCISE_DUMP_BUFLEN - remain_len; + length += sizeof(sec_hdr); + h2d_flowrings_total = dhd_get_max_flow_rings(dhdp); + length += ((H2DRING_TXPOST_ITEMSIZE + * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total) + + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM) + + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM) + + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM) + + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM) + + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM) +#ifdef EWP_EDL + + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM)); +#else + + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM) + + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM)); +#endif /* EWP_EDL */ + return length; } -#endif /* DHD_MCAST_REGEN */ +#endif /* DHD_DUMP_PCIE_RINGS */ -/* Get interface specific ap_isolate configuration */ -int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +#ifdef EWP_ECNTRS_LOGGING +uint32 +dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp) { - dhd_info_t *dhd = dhdp->info; - dhd_if_t *ifp; + dhd_info_t *dhd_info; + log_dump_section_hdr_t sec_hdr; + int length = 0; + dhd_dbg_ring_t *ring; - ASSERT(idx < DHD_MAX_IFS); + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } - ifp = dhd->iflist[idx]; + if (!dhdp) + return length; - return ifp->ap_isolate; + if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; + length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr); + } + return length; } +#endif /* EWP_ECNTRS_LOGGING */ -/* Set interface specific ap_isolate configuration */ -int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +#ifdef EWP_RTT_LOGGING +uint32 +dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp) { - dhd_info_t *dhd = dhdp->info; - dhd_if_t *ifp; - - ASSERT(idx < DHD_MAX_IFS); + dhd_info_t *dhd_info; + log_dump_section_hdr_t sec_hdr; + int length = 0; + dhd_dbg_ring_t *ring; - ifp = dhd->iflist[idx]; + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; + } - if (ifp) - ifp->ap_isolate = val; + if (!dhdp) + return length; - return 0; + if (logdump_rtt_enable && dhdp->rtt_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring; + length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr); + } + return length; } +#endif /* EWP_RTT_LOGGING */ -#ifdef DHD_FW_COREDUMP -#if defined(CONFIG_X86) -#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info" -#define MEMDUMPINFO_INST "/data/.memdump.info" -#endif /* CONFIG_X86 && OEM_ANDROID */ - -#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" - -void dhd_get_memdump_info(dhd_pub_t *dhd) +int +dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, int type, void *pos) { - struct file *fp = NULL; - uint32 mem_val = DUMP_MEMFILE_MAX; - int ret = 0; - char *filepath = MEMDUMPINFO; - -#if !defined(CONFIG_X86) - mem_val = DUMP_DISABLED; - goto done; -#endif + int ret = BCME_OK; + struct dhd_log_dump_buf *dld_buf; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; - /* Read memdump info from the file */ - fp = filp_open(filepath, O_RDONLY, 0); - if (IS_ERR(fp)) { - DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); -#if defined(CONFIG_X86) - /* Check if it is Live Brix Image */ - if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) { - goto done; - } - /* Try if it is Installed Brix Image */ - filepath = MEMDUMPINFO_INST; - DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); - fp = filp_open(filepath, O_RDONLY, 0); - if (IS_ERR(fp)) { - DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); - goto done; - } -#else /* Non Brix Android platform */ - goto done; -#endif /* CONFIG_X86 && OEM_ANDROID */ - } + dld_buf = &g_dld_buf[type]; - /* Handle success case */ - ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4); - if (ret < 0) { - DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); - filp_close(fp, NULL); - goto done; + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } else if (!dhdp) { + return BCME_ERROR; } - mem_val = bcm_atoi((char *)&mem_val); - - filp_close(fp, NULL); + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); -#ifdef DHD_INIT_DEFAULT_MEMDUMP - if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX) - mem_val = DUMP_MEMFILE_BUGON; -#endif /* DHD_INIT_DEFAULT_MEMDUMP */ + dhd_init_sec_hdr(&sec_hdr); -done: - dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE; + /* write the section header first */ + ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf, + strlen(dld_hdrs[type].hdr_str), pos); + if (ret < 0) + goto exit; + len -= (uint32)strlen(dld_hdrs[type].hdr_str); + len -= (uint32)sizeof(sec_hdr); + sec_hdr.type = dld_hdrs[type].sec_type; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos); + if (ret < 0) + goto exit; - DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled)); +exit: + return ret; } -void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +static int +dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type) { unsigned long flags = 0; - dhd_dump_t *dump = NULL; +#ifdef EWP_EDL + int i = 0; +#endif /* EWP_EDL */ dhd_info_t *dhd_info = NULL; - dhd_info = (dhd_info_t *)dhdp->info; - dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); - if (dump == NULL) { - DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); - return; + + /* if dhdp is null, its extremely unlikely that log dump will be scheduled + * so not freeing 'type' here is ok, even if we want to free 'type' + * we cannot do so, since 'dhdp->osh' is unavailable + * as dhdp is null + */ + if (!dhdp || !type) { + if (dhdp) { + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + } + return BCME_ERROR; } - dump->buf = buf; - dump->bufsize = size; -#ifdef BCMPCIE - if (dhdp->hscb_enable) { - dhd_get_hscb_info(dhdp->prot, (void*)(&dump->hscb_buf), - (uint32 *)(&dump->hscb_bufsize)); + + dhd_info = (dhd_info_t *)dhdp->info; + /* in case of trap get preserve logs from ETD */ +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data, + &dhd_info->event_data); } - else #endif /* BCMPCIE */ - { - dump->hscb_bufsize = 0; + + /* flush the event work items to get any fw events/logs + * flush_work is a blocking call + */ +#ifdef SHOW_LOGTRACE +#ifdef EWP_EDL + if (dhd_info->pub.dongle_edl_support) { + /* wait till existing edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + /* dhd_flush_logtrace_process will ensure the work items in the ring + * (EDL ring) from rd to wr are processed. But if wr had + * wrapped around, only the work items from rd to ring-end are processed. + * So to ensure that the work items at the + * beginning of ring are also processed in the wrap around case, call + * it twice + */ + for (i = 0; i < 2; i++) { + /* blocks till the edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + } + } else { + dhd_flush_logtrace_process(dhd_info); } +#else + dhd_flush_logtrace_process(dhd_info); +#endif /* EWP_EDL */ +#endif /* SHOW_LOGTRACE */ -#ifdef DHD_LOG_DUMP - dhd_print_buf_addr(dhdp, "memdump", buf, size); -#endif /* DHD_LOG_DUMP */ + return BCME_OK; +} - if (dhdp->memdump_enabled == DUMP_MEMONLY) { - BUG_ON(1); +int +dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size) +{ + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } -#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) - if ( -#if defined(DEBUG_DNGL_INIT_FAIL) - (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) || -#endif /* DEBUG_DNGL_INIT_FAIL */ -#ifdef DHD_ERPOM - (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) || -#endif /* DHD_ERPOM */ - FALSE) - { -#ifdef DHD_LOG_DUMP - log_dump_type_t *flush_type = NULL; -#endif // endif - dhd_info->scheduled_memdump = FALSE; - dhd_mem_dump((void *)dhdp->info, (void *)dump, 0); - /* for dongle init fail cases, 'dhd_mem_dump' does - * not call 'dhd_log_dump', so call it here. - */ -#ifdef DHD_LOG_DUMP - flush_type = MALLOCZ(dhdp->osh, - sizeof(log_dump_type_t)); - if (flush_type) { - *flush_type = DLD_BUF_TYPE_ALL; - DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); - dhd_log_dump(dhdp->info, flush_type, 0); - } -#endif /* DHD_LOG_DUMP */ - return; + if (!dhdp) + return BCME_ERROR; + + memset(dump_path, 0, size); + + switch (dhdp->debug_dump_subcmd) { + case CMD_UNWANTED: + snprintf(dump_path, size, "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE + DHD_DUMP_SUBSTR_UNWANTED); + break; + case CMD_DISCONNECTED: + snprintf(dump_path, size, "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE + DHD_DUMP_SUBSTR_DISCONNECTED); + break; + default: + snprintf(dump_path, size, "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE); } -#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */ - dhd_info->scheduled_memdump = TRUE; - /* bus busy bit for mem dump will be cleared in mem dump - * work item context, after mem dump file is written - */ - DHD_GENERAL_LOCK(dhdp, flags); - DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); - DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); - dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, - DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH); + if (!dhdp->logdump_periodic_flush) { + get_debug_dump_time(dhdp->debug_dump_time_str); + snprintf(dump_path + strlen(dump_path), + size - strlen(dump_path), + "_%s", dhdp->debug_dump_time_str); + } + return BCME_OK; +} + +uint32 +dhd_get_dld_len(int log_type) +{ + unsigned long wr_size = 0; + unsigned long buf_size = 0; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf; + log_dump_section_hdr_t sec_hdr; + + /* calculate the length of the log */ + dld_buf = &g_dld_buf[log_type]; + buf_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + + if (dld_buf->wraparound) { + wr_size = buf_size; + } else { + /* need to hold the lock before accessing 'present' and 'remain' ptrs */ + spin_lock_irqsave(&dld_buf->lock, flags); + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + spin_unlock_irqrestore(&dld_buf->lock, flags); + } + return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str)); +} + +static void +dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size) +{ + char *ts = NULL; + memset(time_str, 0, size); + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, size, + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); } -static void -dhd_mem_dump(void *handle, void *event_info, u8 event) +int +dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos) { - dhd_info_t *dhd = handle; - dhd_pub_t *dhdp = NULL; - dhd_dump_t *dump = event_info; - unsigned long flags = 0; + char *ts = NULL; + int ret = 0; + char time_str[128]; - DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); + memset_s(time_str, sizeof(time_str), 0, sizeof(time_str)); + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, sizeof(time_str), + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); - if (!dhd) { - DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); - return; + /* write the timestamp hdr to the file first */ + ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); } + return ret; +} - dhdp = &dhd->pub; +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) +int +dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; - DHD_GENERAL_LOCK(dhdp, flags); - if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { - DHD_GENERAL_UNLOCK(dhdp, flags); - DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); - goto exit; + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } - DHD_GENERAL_UNLOCK(dhdp, flags); - if (!dump) { - DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); - goto exit; - } + if (!dhdp) + return BCME_ERROR; - if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { - DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); -#ifdef DHD_DEBUG_UART - dhd->pub.memdump_success = FALSE; -#endif /* DHD_DEBUG_UART */ + dhd_init_sec_hdr(&sec_hdr); + + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + /* write the section header first */ + ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf, + strlen(HEALTH_CHK_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(HEALTH_CHK_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK; + sec_hdr.length = HEALTH_CHK_BUF_SIZE; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + /* write the log */ + ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp, + user_buf, len, pos); + if (ret < 0) + goto exit; } +exit: + return ret; +} +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ - /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue - * context, no need to schedule another work queue for log dump. In case of - * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP), - * cfg layer is itself scheduling the log_dump work queue. - * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not - * collect debug_dump as it may be called from non-sleepable context. - */ -#ifdef DHD_LOG_DUMP - if (dhd->scheduled_memdump && - dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { - log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, - sizeof(log_dump_type_t)); - if (flush_type) { - *flush_type = DLD_BUF_TYPE_ALL; - DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); - dhd_log_dump(dhd, flush_type, 0); - } +#ifdef BCMPCIE +int +dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } -#endif /* DHD_LOG_DUMP */ - clear_debug_dump_time(dhdp->debug_dump_time_str); + if (!dhdp) + return BCME_ERROR; - /* before calling bug on, wait for other logs to be dumped. - * we cannot wait in case dhd_mem_dump is called directly - * as it may not be in a sleepable context - */ - if (dhd->scheduled_memdump) { - uint bitmask = 0; - int timeleft = 0; -#ifdef DHD_SSSR_DUMP - bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP; -#endif // endif - if (bitmask != 0) { - DHD_ERROR(("%s: wait for SSSR dump..\n", __FUNCTION__)); - timeleft = dhd_os_busbusy_wait_bitmask(dhdp, - &dhdp->dhd_bus_busy_state, bitmask, 0); - if ((timeleft == 0) || (timeleft == 1)) { - DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n", - __FUNCTION__, dhdp->dhd_bus_busy_state)); - } - } + dhd_init_sec_hdr(&sec_hdr); + + /* append extended trap data to the file in case of traps */ + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + /* write the section header first */ + ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf, + strlen(EXT_TRAP_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(EXT_TRAP_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP; + sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN; + ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + /* write the log */ + ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp, + user_buf, len, pos); + if (ret < 0) + goto exit; } +exit: + return ret; +} +#endif /* BCMPCIE */ - if (dump->hscb_buf && dump->hscb_bufsize) { - DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__)); - if (write_dump_to_file(&dhd->pub, dump->hscb_buf, - dump->hscb_bufsize, "mem_dump_hscb")) { - DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__)); -#ifdef DHD_DEBUG_UART - dhd->pub.memdump_success = FALSE; -#endif /* DHD_DEBUG_UART */ - } +int +dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + int ret = BCME_OK; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info; + + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } - DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); - if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && -#ifdef DHD_LOG_DUMP - dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && -#endif /* DHD_LOG_DUMP */ - dhd->pub.memdump_type != DUMP_TYPE_BY_USER && -#ifdef DHD_DEBUG_UART - dhd->pub.memdump_success == TRUE && -#endif /* DHD_DEBUG_UART */ -#ifdef DNGL_EVENT_SUPPORT - dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT && -#endif /* DNGL_EVENT_SUPPORT */ - dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) { + if (!dhdp) + return BCME_ERROR; -#ifdef SHOW_LOGTRACE - /* Wait till logtrace context is flushed */ - dhd_flush_logtrace_process(dhd); -#endif /* SHOW_LOGTRACE */ + dhd_init_sec_hdr(&sec_hdr); - DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__)); - BUG_ON(1); + ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos); + if (ret < 0) + goto exit; + + len -= (uint32)strlen(DHD_DUMP_LOG_HDR); + sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + len -= (uint32)sizeof(sec_hdr); + + if (dhdp->concise_dbg_buf) { + dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos); + if (ret < 0) + goto exit; } - DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type)); exit: - if (dump) - MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); - DHD_GENERAL_LOCK(dhdp, flags); - DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub); - dhd_os_busbusy_wake(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); - dhd->scheduled_memdump = FALSE; - DHD_ERROR(("%s: EXIT \n", __FUNCTION__)); + return ret; } -#endif /* DHD_FW_COREDUMP */ - -#ifdef DHD_SSSR_DUMP -static void -dhd_sssr_dump(void *handle, void *event_info, u8 event) +int +dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) { - dhd_info_t *dhd = handle; - dhd_pub_t *dhdp; - int i; - char before_sr_dump[128]; - char after_sr_dump[128]; - unsigned long flags = 0; - uint dig_buf_size = 0; + int ret = BCME_OK; + dhd_info_t *dhd_info; - DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } - if (!dhd) { - DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); - return; + if (!dhdp) + return BCME_ERROR; + + if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { + ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos); } + return ret; +} - dhdp = &dhd->pub; +#ifdef DHD_DUMP_PCIE_RINGS +int +dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) +{ + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + uint32 remain_len = 0; + dhd_info_t *dhd_info; - DHD_GENERAL_LOCK(dhdp, flags); - if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { - DHD_GENERAL_UNLOCK(dhdp, flags); - DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__)); - goto exit; + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } - DHD_GENERAL_UNLOCK(dhdp, flags); - for (i = 0; i < MAX_NUM_D11CORES; i++) { - /* Init file name */ - memset(before_sr_dump, 0, sizeof(before_sr_dump)); - memset(after_sr_dump, 0, sizeof(after_sr_dump)); + if (!dhdp) + return BCME_ERROR; - snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s", - "sssr_core", i, "before_SR"); - snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s", - "sssr_core", i, "after_SR"); + dhd_init_sec_hdr(&sec_hdr); - if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] && - (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { - if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i], - dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) { - DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n", - __FUNCTION__)); - } - } - if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) { - if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i], - dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) { - DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n", - __FUNCTION__)); - } - } - } + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); - if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) { - dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size; - } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && - dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) { - dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size; - } + /* write the section header first */ + ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf, + strlen(FLOWRING_DUMP_HDR), pos); + if (ret < 0) + goto exit; - if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { - if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before, - dig_buf_size, "sssr_dig_before_SR")) { - DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n", - __FUNCTION__)); - } - } + /* Write the ring summary */ + ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, + (CONCISE_DUMP_BUFLEN - remain_len), pos); + if (ret < 0) + goto exit; - if (dhdp->sssr_dig_buf_after) { - if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after, - dig_buf_size, "sssr_dig_after_SR")) { - DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n", - __FUNCTION__)); - } - } + sec_hdr.type = LOG_DUMP_SECTION_FLOWRING; + sec_hdr.length = len; + ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos); + if (ret < 0) + goto exit; + + /* write the log */ + ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE); + if (ret < 0) + goto exit; exit: - DHD_GENERAL_LOCK(dhdp, flags); - DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp); - dhd_os_busbusy_wake(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; } +#endif /* DHD_DUMP_PCIE_RINGS */ -void -dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode) +#ifdef EWP_ECNTRS_LOGGING +int +dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) { - unsigned long flags = 0; + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + dhd_info_t *dhd_info; - dhdp->sssr_dump_mode = dump_mode; + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } - /* bus busy bit for sssr dump will be cleared in sssr dump - * work item context, after sssr dump files are created - */ - DHD_GENERAL_LOCK(dhdp, flags); - DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); + if (!dhdp) + return BCME_ERROR; - if (dhdp->info->no_wq_sssrdump) { - dhd_sssr_dump(dhdp->info, 0, 0); - return; + dhd_init_sec_hdr(&sec_hdr); + + if (logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + sec_hdr.type = LOG_DUMP_SECTION_ECNTRS; + ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring, + user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS); } + return ret; - DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__)); - dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, - DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH); } -#endif /* DHD_SSSR_DUMP */ +#endif /* EWP_ECNTRS_LOGGING */ -#ifdef DHD_LOG_DUMP -static void -dhd_log_dump(void *handle, void *event_info, u8 event) +#ifdef EWP_RTT_LOGGING +int +dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) { - dhd_info_t *dhd = handle; - log_dump_type_t *type = (log_dump_type_t *)event_info; + log_dump_section_hdr_t sec_hdr; + int ret = BCME_OK; + dhd_info_t *dhd_info; - if (!dhd || !type) { - DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); - return; + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; } -#ifdef WL_CFG80211 - /* flush the fw side logs */ - wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub), - FW_LOGSET_MASK_ALL); -#endif // endif - /* there are currently 3 possible contexts from which - * log dump can be scheduled - - * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command - * 3.HEALTH CHECK event - * The concise debug info buffer is a shared resource - * and in case a trap is one of the contexts then both the - * scheduled work queues need to run because trap data is - * essential for debugging. Hence a mutex lock is acquired - * before calling do_dhd_log_dump(). - */ - DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); - dhd_os_logdump_lock(&dhd->pub); - DHD_OS_WAKE_LOCK(&dhd->pub); - if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) { - DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); - } - DHD_OS_WAKE_UNLOCK(&dhd->pub); - dhd_os_logdump_unlock(&dhd->pub); -} + if (!dhdp) + return BCME_ERROR; -void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type) -{ - DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__)); - dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, - type, DHD_WQ_WORK_DHD_LOG_DUMP, - dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH); -} + dhd_init_sec_hdr(&sec_hdr); -static void -dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size) -{ - if ((dhdp->memdump_enabled == DUMP_MEMONLY) || - (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)) { -#if defined(CONFIG_ARM64) - DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", - name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); -#elif defined(__ARM_ARCH_7A__) - DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", - name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); -#endif /* __ARM_ARCH_7A__ */ + if (logdump_rtt_enable && dhdp->rtt_dbg_ring) { + ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring, + user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT); } + return ret; + } +#endif /* EWP_RTT_LOGGING */ -static void -dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type) +#ifdef DHD_STATUS_LOGGING +int +dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf, + void *fp, uint32 len, void *pos) { - int i; - unsigned long wr_size = 0; - struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; - size_t log_size = 0; - char buf_name[DHD_PRINT_BUF_NAME_LEN]; - dhd_dbg_ring_t *ring = NULL; + dhd_info_t *dhd_info; - BCM_REFERENCE(ring); + if (dev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev); + dhdp = &dhd_info->pub; + } - for (i = 0; i < DLD_BUFFER_NUM; i++) { - dld_buf = &g_dld_buf[i]; - log_size = (unsigned long)dld_buf->max - - (unsigned long)dld_buf->buffer; - if (dld_buf->wraparound) { - wr_size = log_size; - } else { - wr_size = (unsigned long)dld_buf->present - - (unsigned long)dld_buf->front; - } - scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i); - dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]); - scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i); - dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size); - scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i); - dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size); - scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i); - dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size); + if (!dhdp) { + return BCME_ERROR; } -#ifdef EWP_ECNTRS_LOGGING - /* periodic flushing of ecounters is NOT supported */ - if (*type == DLD_BUF_TYPE_ALL && - logdump_ecntr_enable && - dhdp->ecntr_dbg_ring) { + return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos); +} - ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; - dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE); - dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf, - LOG_DUMP_ECNTRS_MAX_BUFSIZE); - } -#endif /* EWP_ECNTRS_LOGGING */ +uint32 +dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp) +{ + dhd_info_t *dhd_info; + uint32 length = 0; -#ifdef BCMPCIE - if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) { - dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data, - BCMPCIE_EXT_TRAP_DATA_MAXLEN); + if (ndev) { + dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev); + dhdp = &dhd_info->pub; } -#endif /* BCMPCIE */ -#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) - /* if health check event was received */ - if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { - dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data, - HEALTH_CHK_BUF_SIZE); + if (dhdp) { + length = dhd_statlog_get_logbuf_len(dhdp); } -#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ - /* append the concise debug information */ - if (dhdp->concise_dbg_buf) { - dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf, - CONCISE_DUMP_BUFLEN); - } + return length; +} +#endif /* DHD_STATUS_LOGGING */ + +void +dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr) +{ + /* prep the section header */ + memset(sec_hdr, 0, sizeof(*sec_hdr)); + sec_hdr->magic = LOG_DUMP_MAGIC; + sec_hdr->timestamp = local_clock(); } /* Must hold 'dhd_os_logdump_lock' before calling this function ! */ @@ -18954,123 +18302,44 @@ do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) struct file *fp = NULL; mm_segment_t old_fs; loff_t pos = 0; - unsigned int wr_size = 0; char dump_path[128]; uint32 file_mode; unsigned long flags = 0; - struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; size_t log_size = 0; size_t fspace_remain = 0; struct kstat stat; char time_str[128]; - char *ts = NULL; - uint32 remain_len = 0; + unsigned int len = 0; log_dump_section_hdr_t sec_hdr; - dhd_info_t *dhd_info = NULL; DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); - /* if dhdp is null, its extremely unlikely that log dump will be scheduled - * so not freeing 'type' here is ok, even if we want to free 'type' - * we cannot do so, since 'dhdp->osh' is unavailable - * as dhdp is null - */ - if (!dhdp || !type) { - if (dhdp) { - DHD_GENERAL_LOCK(dhdp, flags); - DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); - dhd_os_busbusy_wake(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); - } - return BCME_ERROR; - } - - DHD_GENERAL_LOCK(dhdp, flags); - if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { - DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); - dhd_os_busbusy_wake(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); - MFREE(dhdp->osh, type, sizeof(*type)); - DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); - return BCME_ERROR; - } - DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp); - DHD_GENERAL_UNLOCK(dhdp, flags); - - dhd_info = (dhd_info_t *)dhdp->info; - /* in case of trap get preserve logs from ETD */ -#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) - if (dhdp->dongle_trap_occured && - dhdp->extended_trap_data) { - dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data, - &dhd_info->event_data); - } -#endif /* BCMPCIE */ - - /* flush the event work items to get any fw events/logs - * flush_work is a blocking call - */ -#ifdef EWP_EDL - if (dhd_info->pub.dongle_edl_support) { - /* wait till existing edl items are processed */ - dhd_flush_logtrace_process(dhd_info); - /* dhd_flush_logtrace_process will ensure the work items in the ring - * (EDL ring) from rd to wr are processed. But if wr had - * wrapped around, only the work items from rd to ring-end are processed. - * So to ensure that the work items at the - * beginning of ring are also processed in the wrap around case, call - * it twice - */ - for (i = 0; i < 2; i++) { - /* blocks till the edl items are processed */ - dhd_flush_logtrace_process(dhd_info); - } - } else { - dhd_flush_logtrace_process(dhd_info); - } -#else - dhd_flush_logtrace_process(dhd_info); -#endif /* EWP_EDL */ - - /* change to KERNEL_DS address limit */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - - /* Init file name */ - memset(dump_path, 0, sizeof(dump_path)); - switch (dhdp->debug_dump_subcmd) { - case CMD_UNWANTED: - snprintf(dump_path, sizeof(dump_path), "%s", - DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE - DHD_DUMP_SUBSTR_UNWANTED); - break; - case CMD_DISCONNECTED: - snprintf(dump_path, sizeof(dump_path), "%s", - DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE - DHD_DUMP_SUBSTR_DISCONNECTED); - break; - default: - snprintf(dump_path, sizeof(dump_path), "%s", - DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE); + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); + goto exit1; } + DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); - if (!dhdp->logdump_periodic_flush) { - get_debug_dump_time(dhdp->debug_dump_time_str); - snprintf(dump_path, sizeof(dump_path), "%s_" "%s", - dump_path, dhdp->debug_dump_time_str); + if ((ret = dhd_log_flush(dhdp, type)) < 0) { + goto exit1; } + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); - memset(time_str, 0, sizeof(time_str)); - ts = dhd_log_dump_get_timestamp(); - snprintf(time_str, sizeof(time_str), - "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); + dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path)); + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); DHD_ERROR(("DHD version: %s\n", dhd_version)); DHD_ERROR(("F/W version: %s\n", fw_version)); - DHD_ERROR(("debug_dump_path = %s\n", dump_path)); dhd_log_dump_buf_addr(dhdp, type); + dhd_get_time_str(dhdp, time_str, 128); + /* if this is the first time after dhd is loaded, * or, if periodic flush is disabled, clear the log file */ @@ -19095,20 +18364,20 @@ do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) if (IS_ERR(fp)) { ret = PTR_ERR(fp); DHD_ERROR(("open file error, err = %d\n", ret)); - goto exit; + goto exit2; } DHD_ERROR(("debug_dump_path = %s\n", dump_path)); #else ret = PTR_ERR(fp); DHD_ERROR(("open file error, err = %d\n", ret)); - goto exit; + goto exit2; #endif /* CONFIG_X86 && OEM_ANDROID */ } ret = vfs_stat(dump_path, &stat); if (ret < 0) { DHD_ERROR(("file stat error, err = %d\n", ret)); - goto exit; + goto exit2; } /* if some one else has changed the file */ @@ -19142,7 +18411,7 @@ do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR); if (ret < 0) { DHD_ERROR(("file seek last posn error ! err = %d \n", ret)); - goto exit; + goto exit2; } pos = fp->f_pos; @@ -19155,58 +18424,16 @@ do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) pos = fp->f_pos; } } - /* write the timestamp hdr to the file first */ - ret = vfs_write(fp, time_str, strlen(time_str), &pos); - if (ret < 0) { - DHD_ERROR(("write file error, err = %d\n", ret)); - goto exit; - } - /* prep the section header */ - memset(&sec_hdr, 0, sizeof(sec_hdr)); - sec_hdr.magic = LOG_DUMP_MAGIC; - sec_hdr.timestamp = local_clock(); + dhd_print_time_str(0, fp, len, &pos); for (i = 0; i < DLD_BUFFER_NUM; ++i) { - unsigned int buf_size = 0; if (*type != DLD_BUF_TYPE_ALL && i != *type) continue; - /* calculate the length of the log */ - dld_buf = &g_dld_buf[i]; - buf_size = (unsigned long)dld_buf->max - - (unsigned long)dld_buf->buffer; - if (dld_buf->wraparound) { - wr_size = buf_size; - } else { - /* need to hold the lock before accessing 'present' and 'remain' ptrs */ - spin_lock_irqsave(&dld_buf->lock, flags); - wr_size = (unsigned long)dld_buf->present - - (unsigned long)dld_buf->front; - spin_unlock_irqrestore(&dld_buf->lock, flags); - } - - /* write the section header first */ - sec_hdr.type = dld_hdrs[i].sec_type; - sec_hdr.length = wr_size; - vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos); - vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); - /* write the log */ - ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos); - if (ret < 0) { - DHD_ERROR(("write file error, err = %d\n", ret)); - goto exit; - } - - /* re-init dhd_log_dump_buf structure */ - spin_lock_irqsave(&dld_buf->lock, flags); - dld_buf->wraparound = 0; - dld_buf->present = dld_buf->front; - dld_buf->remain = buf_size; - bzero(dld_buf->buffer, buf_size); - spin_unlock_irqrestore(&dld_buf->lock, flags); - + len = dhd_get_dld_len(i); + dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos); if (*type != DLD_BUF_TYPE_ALL) break; } @@ -19217,134 +18444,87 @@ do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) logdump_ecntr_enable && dhdp->ecntr_dbg_ring) { dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring, - fp, (unsigned long *)&pos, &sec_hdr); + fp, (unsigned long *)&pos, + &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS); } #endif /* EWP_ECNTRS_LOGGING */ -#ifdef BCMPCIE - /* append extended trap data to the file in case of traps */ - if (dhdp->dongle_trap_occured && - dhdp->extended_trap_data) { - /* write the section header first */ - vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos); - sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP; - sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN; - vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); - /* write the log */ - ret = vfs_write(fp, (char *)dhdp->extended_trap_data, - BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos); - if (ret < 0) { - DHD_ERROR(("write file error of ext trap info," - " err = %d\n", ret)); - goto exit; +#ifdef DHD_STATUS_LOGGING + if (dhdp->statlog) { + /* write the statlog */ + len = dhd_get_status_log_len(NULL, dhdp); + if (len) { + if (dhd_print_status_log_data(NULL, dhdp, 0, fp, + len, &pos) < 0) { + goto exit2; + } } } +#endif /* DHD_STATUS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_rtt_enable && + dhdp->rtt_dbg_ring) { + dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring, + fp, (unsigned long *)&pos, + &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT); + } +#endif /* EWP_RTT_LOGGING */ + +#ifdef BCMPCIE + len = dhd_get_ext_trap_len(NULL, dhdp); + if (len) { + if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; + } #endif /* BCMPCIE */ #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) - /* if health check event was received, dump to file */ - if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { - /* write the section header first */ - vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos); - sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK; - sec_hdr.length = HEALTH_CHK_BUF_SIZE; - vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); - /* write the log */ - ret = vfs_write(fp, (char *)dhdp->health_chk_event_data, - HEALTH_CHK_BUF_SIZE, &pos); - if (ret < 0) { - DHD_ERROR(("write file error of health chk info," - " err = %d\n", ret)); - goto exit; - } + len = dhd_get_health_chk_len(NULL, dhdp); + if (len) { + if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; } #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ -#ifdef DHD_DUMP_PCIE_RINGS - /* write the section header first */ - vfs_write(fp, FLOWRING_DUMP_HDR, strlen(FLOWRING_DUMP_HDR), &pos); - /* Write the ring summary */ - ret = vfs_write(fp, dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN - remain_len, &pos); - if (ret < 0) { - DHD_ERROR(("write file error of concise debug info," - " err = %d\n", ret)); - goto exit; - } - sec_hdr.type = LOG_DUMP_SECTION_FLOWRING; - sec_hdr.length = ((H2DRING_TXPOST_ITEMSIZE - * H2DRING_TXPOST_MAX_ITEM) - + (D2HRING_TXCMPLT_ITEMSIZE - * D2HRING_TXCMPLT_MAX_ITEM) - + (H2DRING_RXPOST_ITEMSIZE - * H2DRING_RXPOST_MAX_ITEM) - + (D2HRING_RXCMPLT_ITEMSIZE - * D2HRING_RXCMPLT_MAX_ITEM) - + (H2DRING_CTRL_SUB_ITEMSIZE - * H2DRING_CTRL_SUB_MAX_ITEM) - + (D2HRING_CTRL_CMPLT_ITEMSIZE - * D2HRING_CTRL_CMPLT_MAX_ITEM) - + (H2DRING_INFO_BUFPOST_ITEMSIZE - * H2DRING_DYNAMIC_INFO_MAX_ITEM) - + (D2HRING_INFO_BUFCMPLT_ITEMSIZE - * D2HRING_DYNAMIC_INFO_MAX_ITEM)); - vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); - /* write the log */ - ret = dhd_d2h_h2d_ring_dump(dhdp, fp, (unsigned long *)&pos); - if (ret < 0) { - DHD_ERROR(("%s: error dumping ring data!\n", - __FUNCTION__)); - goto exit; + len = dhd_get_dhd_dump_len(NULL, dhdp); + if (len) { + if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; } -#endif /* DHD_DUMP_PCIE_RINGS */ - /* append the concise debug information to the file. - * This is the information which is seen - * when a 'dhd dump' iovar is fired - */ - if (dhdp->concise_dbg_buf) { - remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); - if (remain_len <= 0) { - DHD_ERROR(("%s: error getting concise debug info !\n", - __FUNCTION__)); - goto exit; - } else { - /* write the section header first */ - vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos); - sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP; - sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len; - vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); - /* write the log */ - ret = vfs_write(fp, dhdp->concise_dbg_buf, - CONCISE_DUMP_BUFLEN - remain_len, &pos); - if (ret < 0) { - DHD_ERROR(("write file error of concise debug info," - " err = %d\n", ret)); - goto exit; - } - } + len = dhd_get_cookie_log_len(NULL, dhdp); + if (len) { + if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; } - if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { - ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos); - if (ret < 0) { - DHD_ERROR(("write file error of cooke info, err = %d\n", ret)); - goto exit; - } +#ifdef DHD_DUMP_PCIE_RINGS + len = dhd_get_flowring_len(NULL, dhdp); + if (len) { + if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0) + goto exit2; } +#endif // endif if (dhdp->logdump_periodic_flush) { /* store the last position written to in the file for future use */ dhdp->last_file_posn = pos; } -exit: - MFREE(dhdp->osh, type, sizeof(*type)); +exit2: if (!IS_ERR(fp) && fp != NULL) { filp_close(fp, NULL); DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n", __FUNCTION__, dump_path)); } set_fs(old_fs); +exit1: + if (type) { + MFREE(dhdp->osh, type, sizeof(*type)); + } DHD_GENERAL_LOCK(dhdp, flags); DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); dhd_os_busbusy_wake(dhdp); @@ -19360,38 +18540,50 @@ exit: } #endif /* DHD_LOG_DUMP */ -#ifdef BCMASSERT_LOG -#define ASSERTINFO "/data/misc/wifi/.assert.info" -void dhd_get_assert_info(dhd_pub_t *dhd) +/* This function writes data to the file pointed by fp, OR + * copies data to the user buffer sent by upper layer(HAL). + */ +int +dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos) { - struct file *fp = NULL; - char *filepath = ASSERTINFO; - int mem_val = -1; + int ret = BCME_OK; - /* - * Read assert info from the file - * 0: Trigger Kernel crash by panic() - * 1: Print out the logs and don't trigger Kernel panic. (default) - * 2: Trigger Kernel crash by BUG() - * File doesn't exist: Keep default value (1). - */ - fp = filp_open(filepath, O_RDONLY, 0); - if (IS_ERR(fp)) { - DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); - } else { - int ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4); + if (fp) { + ret = compat_vfs_write(fp, mem_buf, buf_len, (loff_t *)pos); if (ret < 0) { - DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); - } else { - mem_val = bcm_atoi((char *)&mem_val); - DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; } - filp_close(fp, NULL); + } else { +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) { +#else + if (is_compat_task()) { +#endif /* LINUX_VER >= 4.6 */ + void * usr_ptr = compat_ptr((uintptr_t) user_buf); + ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)), + mem_buf, buf_len); + if (ret) { + DHD_ERROR(("failed to copy into user buffer : %d\n", ret)); + goto exit; + } + } + else +#endif /* CONFIG_COMPAT */ + { + ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)), + mem_buf, buf_len); + if (ret) { + DHD_ERROR(("failed to copy into user buffer : %d\n", ret)); + goto exit; + } + } + (*(int *)pos) += buf_len; } - /* By default. set to 0, Kernel Panic */ - g_assert_type = (mem_val >= 0) ? mem_val : 0; +exit: + return ret; } -#endif /* BCMASSERT_LOG */ /* * This call is to get the memdump size so that, @@ -19436,9 +18628,7 @@ dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size) orig_len = *size; if (dhdp->soc_ram) { if (orig_len >= dhdp->soc_ram_length) { - memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length); - /* reset the storage of dump */ - memset(dhdp->soc_ram, 0, dhdp->soc_ram_length); + *buf = dhdp->soc_ram; *size = dhdp->soc_ram_length; } else { ret = BCME_BUFTOOSHORT; @@ -19474,6 +18664,49 @@ dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size return BCME_OK; } +#ifdef DNGL_AXI_ERROR_LOGGING +int +dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + loff_t pos = 0; + if (user_buf == NULL) { + DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + ret = dhd_export_debug_data((char *)dhdp->axi_err_dump, + NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos); + + if (ret < 0) { + DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret)); + return ret; + } + return ret; +} + +int +dhd_os_get_axi_error_dump_size(struct net_device *dev) +{ + int size = -1; + + size = sizeof(dhd_axi_error_dump_t); + if (size < 0) { + DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size)); + } + return size; +} + +void +dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len) +{ + snprintf(dump_path, len, "%s", + DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME); +} +#endif /* DNGL_AXI_ERROR_LOGGING */ + bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) { return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; @@ -19801,6 +19034,12 @@ void custom_rps_map_clear(struct netdev_rx_queue *queue) #endif // endif #if defined(ARGOS_NOTIFY_CB) + +static int argos_status_notifier_wifi_cb(struct notifier_block *notifier, + unsigned long speed, void *v); +static int argos_status_notifier_p2p_cb(struct notifier_block *notifier, + unsigned long speed, void *v); + int argos_register_notifier_init(struct net_device *net) { @@ -19896,19 +19135,6 @@ argos_status_notifier_wifi_cb(struct notifier_block *notifier, if (dhdp == NULL || !dhdp->up) { goto exit; } -#if defined(ARGOS_NOTIFY_CB) - if (speed > PCIE_IRQ_AFFINITY_THRESHOLD) { - if (dhdpcie_get_pcieirq(dhd->pub.bus, &pcie_irq)) { - DHD_ERROR(("%s, Failed to get PCIe IRQ\n", __FUNCTION__)); - } else { - DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n", - __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE)); - irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE)); - } - } else { - dhd_irq_set_affinity(dhdp); - } -#endif /* ARGOS_NOTIFY_CB */ /* Check if reported TPut value is more than threshold value */ if (speed > RPS_TPUT_THRESHOLD) { if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) { @@ -20039,125 +19265,13 @@ dhd_linux_get_primary_netdev(dhd_pub_t *dhdp) return NULL; } -#ifdef DHD_DHCP_DUMP -static void -dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx) -{ - struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN]; - struct iphdr *h = &b->ip_header; - uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len); - int dhcp_type = 0, len, opt_len; - - /* check IP header */ - if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) { - return; - } - - /* check UDP port for bootp (67, 68) */ - if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) && - b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) { - return; - } - - /* check header length */ - if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) { - return; - } - - len = ntohs(b->udp_header.len) - sizeof(struct udphdr); - opt_len = len - - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options)); - - /* parse bootp options */ - if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) { - ptr = &b->options[4]; - while (ptr < end && *ptr != 0xff) { - opt = ptr++; - if (*opt == 0) { - continue; - } - ptr += *ptr + 1; - if (ptr >= end) { - break; - } - /* 53 is dhcp type */ - if (*opt == 53) { - if (opt[1]) { - dhcp_type = opt[2]; - DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n", - ifname, dhcp_types[dhcp_type], - tx ? "TX" : "RX", dhcp_ops[b->op])); - break; - } - } - } - } -} -#endif /* DHD_DHCP_DUMP */ - -#ifdef DHD_ICMP_DUMP -static void -dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx) -{ - uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; - struct iphdr *iph = (struct iphdr *)pkt; - struct icmphdr *icmph; - - /* check IP header */ - if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) { - return; - } - - icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr)); - if (icmph->type == ICMP_ECHO) { - DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n", - ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); - } else if (icmph->type == ICMP_ECHOREPLY) { - DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n", - ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); - } else { - DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n", - ifname, tx ? "TX" : "RX", icmph->type, icmph->code)); - } -} -#endif /* DHD_ICMP_DUMP */ - -#ifdef SHOW_LOGTRACE -void -dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info) -{ - dhd_dbg_ring_status_t ring_status; - uint32 rlen = 0; -#if defined(DEBUGABILITY) - rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf, - TRACE_LOG_BUF_MAX_SIZE, TRUE); -#elif defined(EWP_ECNTRS_LOGGING) - rlen = dhd_dbg_ring_pull_single(dhd_pub->ecntr_dbg_ring, trace_buf_info->buf, - TRACE_LOG_BUF_MAX_SIZE, TRUE); -#else - ASSERT(0); -#endif /* DEBUGABILITY */ - - trace_buf_info->size = rlen; - trace_buf_info->availability = NEXT_BUF_NOT_AVAIL; - if (rlen == 0) { - trace_buf_info->availability = BUF_NOT_AVAILABLE; - return; - } - dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status); - if (ring_status.written_bytes != ring_status.read_bytes) { - trace_buf_info->availability = NEXT_BUF_AVAIL; - } -} -#endif /* SHOW_LOGTRACE */ - -bool +fw_download_status_t dhd_fw_download_status(dhd_pub_t * dhd_pub) { - return dhd_pub->fw_download_done; + return dhd_pub->fw_download_status; } -int +static int dhd_create_to_notifier_skt(void) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) @@ -20207,84 +19321,41 @@ dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size) { struct nlmsghdr *nlh; struct sk_buff *skb_out; + int ret = BCME_ERROR; BCM_REFERENCE(skb); if (sender_pid == 0) { DHD_INFO(("Invalid PID 0\n")); - return -1; + skb_out = NULL; + goto err; } if ((skb_out = nlmsg_new(size, 0)) == NULL) { DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__)); - return -1; + ret = BCME_NOMEM; + goto err; } nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0); - NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ - memcpy(nlmsg_data(nlh), (char *)data, size); - - if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) { - DHD_INFO(("Error sending message\n")); - } - return 0; -} - -static ssize_t -show_enable_ecounter(struct dhd_info *dev, char *buf) -{ - ssize_t ret = 0; - unsigned long onoff; - - onoff = enable_ecounter; - ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", - onoff); - return ret; -} - -static ssize_t -ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count) -{ - unsigned long onoff; - dhd_info_t *dhd = (dhd_info_t *)dev; - dhd_pub_t *dhdp; - - if (!dhd) { - DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); - return count; - } - dhdp = &dhd->pub; - if (!FW_SUPPORTED(dhdp, ecounters)) { - DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__)); - return count; - } - - onoff = bcm_strtoul(buf, NULL, 10); - - sscanf(buf, "%lu", &onoff); - if (onoff != 0 && onoff != 1) { - return -EINVAL; + if (nlh == NULL) { + DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__)); + goto err; } + NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ + (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size); - if (enable_ecounter == onoff) { - DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter)); - return count; + if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) { + DHD_ERROR(("Error sending message, ret:%d\n", ret)); + /* skb is already freed inside nlmsg_unicast() on error case */ + /* explicitly making skb_out to NULL to avoid double free */ + skb_out = NULL; + goto err; } - - enable_ecounter = onoff; - if (enable_ecounter) { - if (dhd_start_ecounters(dhdp) != BCME_OK) { - DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); - } else if (dhd_start_event_ecounters(dhdp) != BCME_OK) { - DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); - } - } else { - if (dhd_stop_ecounters(dhdp) != BCME_OK) { - DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__)); - } else if (dhd_stop_event_ecounters(dhdp) != BCME_OK) { - DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__)); - } + return BCME_OK; +err: + if (skb_out) { + nlmsg_free(skb_out); } - - return count; + return ret; } static void @@ -20307,6 +19378,12 @@ dhd_log_dump_ecntr_enabled(void) return (bool)logdump_ecntr_enable; } +bool +dhd_log_dump_rtt_enabled(void) +{ + return (bool)logdump_rtt_enable; +} + void dhd_log_dump_init(dhd_pub_t *dhd) { @@ -20408,7 +19485,7 @@ dhd_log_dump_init(dhd_pub_t *dhd) ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring; ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID, ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE, - bufptr); + bufptr, TRUE); if (ret != BCME_OK) { DHD_ERROR(("%s: unable to init ecntr ring !\n", __FUNCTION__)); @@ -20422,6 +19499,29 @@ dhd_log_dump_init(dhd_pub_t *dhd) bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE; #endif /* EWP_ECNTRS_LOGGING */ +#ifdef EWP_RTT_LOGGING + /* now use the rest of the pre-alloc'd memory for filter and ecounter log */ + dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t)); + if (!dhd->rtt_dbg_ring) + goto fail; + + ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring; + ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID, + RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE, + bufptr, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: unable to init ecntr ring !\n", + __FUNCTION__)); + goto fail; + } + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + ring->threshold = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + bufptr += LOG_DUMP_RTT_MAX_BUFSIZE; +#endif /* EWP_RTT_LOGGING */ + /* Concise buffer is used as intermediate buffer for following purposes * a) pull ecounters records temporarily before * writing it to file @@ -20472,6 +19572,17 @@ fail: } #endif /* EWP_ECNTRS_LOGGING */ +#ifdef EWP_RTT_LOGGING + if (dhd->rtt_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring; + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + dhd->rtt_dbg_ring = NULL; + } +#endif /* EWP_RTT_LOGGING */ + #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) if (prealloc_buf) { DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE); @@ -20530,6 +19641,17 @@ dhd_log_dump_deinit(dhd_pub_t *dhd) } #endif /* EWP_ECNTRS_LOGGING */ +#ifdef EWP_RTT_LOGGING + if (dhd->rtt_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring; + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + dhd->rtt_dbg_ring = NULL; + } +#endif /* EWP_RTT_LOGGING */ + /* 'general' buffer points to start of the pre-alloc'd memory */ dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL]; dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL]; @@ -20664,380 +19786,6 @@ dhd_flush_rx_tx_wq(dhd_pub_t *dhdp) } #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ -#ifdef DHD_LB_TXP -#define DHD_LB_TXBOUND 64 -/* - * Function that performs the TX processing on a given CPU - */ -bool -dhd_lb_tx_process(dhd_info_t *dhd) -{ - struct sk_buff *skb; - int cnt = 0; - struct net_device *net; - int ifidx; - bool resched = FALSE; - - DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__)); - if (dhd == NULL) { - DHD_ERROR((" Null pointer DHD \r\n")); - return resched; - } - - BCM_REFERENCE(net); - - DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); - - /* Base Loop to perform the actual Tx */ - do { - skb = skb_dequeue(&dhd->tx_pend_queue); - if (skb == NULL) { - DHD_TRACE(("Dequeued a Null Packet \r\n")); - break; - } - cnt++; - - net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); - ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); - - DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb, - net, ifidx)); - - __dhd_sendpkt(&dhd->pub, ifidx, skb); - - if (cnt >= DHD_LB_TXBOUND) { - resched = TRUE; - break; - } - - } while (1); - - DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt)); - - return resched; -} - -void -dhd_lb_tx_handler(unsigned long data) -{ - dhd_info_t *dhd = (dhd_info_t *)data; - - if (dhd_lb_tx_process(dhd)) { - dhd_tasklet_schedule(&dhd->tx_tasklet); - } -} - -#endif /* DHD_LB_TXP */ - -/* ---------------------------------------------------------------------------- - * Infrastructure code for sysfs interface support for DHD - * - * What is sysfs interface? - * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt - * - * Why sysfs interface? - * This is the Linux standard way of changing/configuring Run Time parameters - * for a driver. We can use this interface to control "linux" specific driver - * parameters. - * - * ----------------------------------------------------------------------------- - */ - -#include -#include - -#if defined(DHD_TRACE_WAKE_LOCK) - -/* Function to show the history buffer */ -static ssize_t -show_wklock_trace(struct dhd_info *dev, char *buf) -{ - ssize_t ret = 0; - dhd_info_t *dhd = (dhd_info_t *)dev; - - buf[ret] = '\n'; - buf[ret+1] = 0; - - dhd_wk_lock_stats_dump(&dhd->pub); - return ret+1; -} - -/* Function to enable/disable wakelock trace */ -static ssize_t -wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) -{ - unsigned long onoff; - unsigned long flags; - dhd_info_t *dhd = (dhd_info_t *)dev; - - onoff = bcm_strtoul(buf, NULL, 10); - if (onoff != 0 && onoff != 1) { - return -EINVAL; - } - - spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - trace_wklock_onoff = onoff; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - if (trace_wklock_onoff) { - printk("ENABLE WAKLOCK TRACE\n"); - } else { - printk("DISABLE WAKELOCK TRACE\n"); - } - - return (ssize_t)(onoff+1); -} -#endif /* DHD_TRACE_WAKE_LOCK */ - -#if defined(DHD_LB_TXP) -static ssize_t -show_lbtxp(struct dhd_info *dev, char *buf) -{ - ssize_t ret = 0; - unsigned long onoff; - dhd_info_t *dhd = (dhd_info_t *)dev; - - onoff = atomic_read(&dhd->lb_txp_active); - ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", - onoff); - return ret; -} - -static ssize_t -lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count) -{ - unsigned long onoff; - dhd_info_t *dhd = (dhd_info_t *)dev; - int i; - - onoff = bcm_strtoul(buf, NULL, 10); - - sscanf(buf, "%lu", &onoff); - if (onoff != 0 && onoff != 1) { - return -EINVAL; - } - atomic_set(&dhd->lb_txp_active, onoff); - - /* Since the scheme is changed clear the counters */ - for (i = 0; i < NR_CPUS; i++) { - DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); - DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); - } - - return count; -} - -#endif /* DHD_LB_TXP */ - -#ifdef DHD_LOG_DUMP -static ssize_t -show_logdump_periodic_flush(struct dhd_info *dev, char *buf) -{ - ssize_t ret = 0; - unsigned long val; - - val = logdump_periodic_flush; - ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); - return ret; -} - -static ssize_t -logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count) -{ - unsigned long val; - - val = bcm_strtoul(buf, NULL, 10); - - sscanf(buf, "%lu", &val); - if (val != 0 && val != 1) { - return -EINVAL; - } - logdump_periodic_flush = val; - return count; -} -static ssize_t -show_logdump_ecntr(struct dhd_info *dev, char *buf) -{ - ssize_t ret = 0; - unsigned long val; - - val = logdump_ecntr_enable; - ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); - return ret; -} - -static ssize_t -logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count) -{ - unsigned long val; - - val = bcm_strtoul(buf, NULL, 10); - - sscanf(buf, "%lu", &val); - if (val != 0 && val != 1) { - return -EINVAL; - } - logdump_ecntr_enable = val; - return count; -} - -#endif /* DHD_LOG_DUMP */ -/* - * Generic Attribute Structure for DHD. - * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have - * to instantiate an object of type dhd_attr, populate it with - * the required show/store functions (ex:- dhd_attr_cpumask_primary) - * and add the object to default_attrs[] array, that gets registered - * to the kobject of dhd (named bcm-dhd). - */ - -struct dhd_attr { - struct attribute attr; - ssize_t(*show)(struct dhd_info *, char *); - ssize_t(*store)(struct dhd_info *, const char *, size_t count); -}; - -#if defined(DHD_TRACE_WAKE_LOCK) -static struct dhd_attr dhd_attr_wklock = - __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); -#endif /* defined(DHD_TRACE_WAKE_LOCK */ - -#if defined(DHD_LB_TXP) -static struct dhd_attr dhd_attr_lbtxp = - __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff); -#endif /* DHD_LB_TXP */ -#ifdef DHD_LOG_DUMP -static struct dhd_attr dhd_attr_logdump_periodic_flush = - __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush, - logdump_periodic_flush_onoff); -static struct dhd_attr dhd_attr_logdump_ecntr = - __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr, - logdump_ecntr_onoff); -#endif /* DHD_LOG_DUMP */ - -static struct dhd_attr dhd_attr_ecounters = - __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff); - -/* Attribute object that gets registered with "bcm-dhd" kobject tree */ -static struct attribute *default_attrs[] = { -#if defined(DHD_TRACE_WAKE_LOCK) - &dhd_attr_wklock.attr, -#endif // endif -#if defined(DHD_LB_TXP) - &dhd_attr_lbtxp.attr, -#endif /* DHD_LB_TXP */ -#ifdef DHD_LOG_DUMP - &dhd_attr_logdump_periodic_flush.attr, - &dhd_attr_logdump_ecntr.attr, -#endif // endif - &dhd_attr_ecounters.attr, - NULL -}; - -#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) -#define to_attr(a) container_of(a, struct dhd_attr, attr) - -/* - * bcm-dhd kobject show function, the "attr" attribute specifices to which - * node under "bcm-dhd" the show function is called. - */ -static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) -{ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - dhd_info_t *dhd = to_dhd(kobj); - struct dhd_attr *d_attr = to_attr(attr); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - int ret; - - if (d_attr->show) - ret = d_attr->show(dhd, buf); - else - ret = -EIO; - - return ret; -} - -/* - * bcm-dhd kobject show function, the "attr" attribute specifices to which - * node under "bcm-dhd" the store function is called. - */ -static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - dhd_info_t *dhd = to_dhd(kobj); - struct dhd_attr *d_attr = to_attr(attr); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - int ret; - - if (d_attr->store) - ret = d_attr->store(dhd, buf, count); - else - ret = -EIO; - - return ret; - -} - -static struct sysfs_ops dhd_sysfs_ops = { - .show = dhd_show, - .store = dhd_store, -}; - -static struct kobj_type dhd_ktype = { - .sysfs_ops = &dhd_sysfs_ops, - .default_attrs = default_attrs, -}; - -/* Create a kobject and attach to sysfs interface */ -static int dhd_sysfs_init(dhd_info_t *dhd) -{ - int ret = -1; - - if (dhd == NULL) { - DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); - return ret; - } - - /* Initialize the kobject */ - ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); - if (ret) { - kobject_put(&dhd->dhd_kobj); - DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); - return ret; - } - - /* - * We are always responsible for sending the uevent that the kobject - * was added to the system. - */ - kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); - - return ret; -} - -/* Done with the kobject and detach the sysfs interface */ -static void dhd_sysfs_exit(dhd_info_t *dhd) -{ - if (dhd == NULL) { - DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); - return; - } - - /* Releae the kobject */ - if (dhd->dhd_kobj.state_initialized) - kobject_put(&dhd->dhd_kobj); -} - #ifdef DHD_DEBUG_UART bool dhd_debug_uart_is_running(struct net_device *dev) @@ -21070,7 +19818,8 @@ dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd) if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) #endif // endif { - if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN || + if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT || + dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT || #ifdef DHD_FW_COREDUMP dhdp->memdump_success == FALSE || #endif // endif @@ -21151,11 +19900,13 @@ dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) } #endif /* PCIE_FULL_DONGLE */ /* ---------------------------- End of sysfs implementation ------------------------------------- */ + #ifdef SET_PCIE_IRQ_CPU_CORE void -dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set) +dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd) { - unsigned int irq; + unsigned int pcie_irq = 0; + if (!dhdp) { DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); return; @@ -21166,11 +19917,40 @@ dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set) return; } - if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) { + DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd)); + + if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) { + DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__)); return; } - - set_irq_cpucore(irq, set); + + /* + irq_set_affinity() assign dedicated CPU core PCIe interrupt + If dedicated CPU core is not on-line, + PCIe interrupt scheduled on CPU core 0 + */ + switch (affinity_cmd) { + case PCIE_IRQ_AFFINITY_OFF: + break; + case PCIE_IRQ_AFFINITY_BIG_CORE_ANY: +#if defined(CONFIG_ARCH_SM8150) + irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary); + irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary); +#else /* Exynos and Others */ + irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary); +#endif /* CONFIG_ARCH_SM8150 */ + break; +#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) + case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS: + DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n", + __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE)); + irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE)); + break; +#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */ + default: + DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n", + __FUNCTION__, affinity_cmd)); + } } #endif /* SET_PCIE_IRQ_CPU_CORE */ @@ -21193,7 +19973,7 @@ dhd_write_file(const char *filepath, char *buf, int buf_len) ret = BCME_ERROR; } else { if (fp->f_mode & FMODE_WRITE) { - ret = vfs_write(fp, buf, buf_len, &fp->f_pos); + ret = compat_vfs_write(fp, buf, buf_len, &fp->f_pos); if (ret < 0) { DHD_ERROR(("%s: Couldn't write file '%s'\n", __FUNCTION__, filepath)); @@ -21537,7 +20317,15 @@ int dhd_get_random_bytes(uint8 *buf, uint len) { #ifdef BCMPCIE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + int rndlen = get_random_bytes_arch(buf, len); + if (rndlen != len) { + bzero(buf, len); + get_random_bytes(buf, len); + } +#else get_random_bytes_arch(buf, len); +#endif // endif #endif /* BCMPCIE */ return BCME_OK; } @@ -21563,7 +20351,7 @@ dhd_error_recovery(void *handle, void *event_info, u8 event) return; } - ret = dhd_bus_perform_flr_with_quiesce(dhdp); + ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE); if (ret != BCME_DNGL_DEVRESET) { DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d," "toggle REG_ON\n", __FUNCTION__, ret)); @@ -21586,12 +20374,12 @@ dhd_schedule_reset(dhd_pub_t *dhdp) void get_debug_dump_time(char *str) { - struct timeval curtime; + struct osl_timespec curtime; unsigned long local_time; struct rtc_time tm; if (!strlen(str)) { - do_gettimeofday(&curtime); + osl_do_gettimeofday(&curtime); local_time = (u32)(curtime.tv_sec - (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE)); rtc_time_to_tm(local_time, &tm); @@ -21608,81 +20396,6 @@ clear_debug_dump_time(char *str) memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN); } -#define KIRQ_PRINT_BUF_LEN 256 - -void -dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - unsigned long flags = 0; - struct irq_desc *desc; - int i; /* cpu iterator */ - struct bcmstrbuf strbuf; - char tmp_buf[KIRQ_PRINT_BUF_LEN]; - - desc = irq_to_desc(irq_num); - if (!desc) { - DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__)); - return; - } - bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN); - raw_spin_lock_irqsave(&desc->lock, flags); - bcm_bprintf(&strbuf, "dhd irq %u:", irq_num); - for_each_online_cpu(i) - bcm_bprintf(&strbuf, "%10u ", - desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0); - if (desc->irq_data.chip) { - if (desc->irq_data.chip->name) - bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name); - else - bcm_bprintf(&strbuf, " %8s", "-"); - } else { - bcm_bprintf(&strbuf, " %8s", "None"); - } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) - if (desc->irq_data.domain) - bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq); -#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL - bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); -#endif // endif -#endif /* LINUX VERSION > 3.1.0 */ - - if (desc->name) - bcm_bprintf(&strbuf, "-%-8s", desc->name); - - DHD_ERROR(("%s\n", strbuf.origbuf)); - raw_spin_unlock_irqrestore(&desc->lock, flags); -#endif /* LINUX VERSION > 2.6.28 */ -} - -void -dhd_show_kirqstats(dhd_pub_t *dhd) -{ - unsigned int irq = -1; -#ifdef BCMPCIE - dhdpcie_get_pcieirq(dhd->bus, &irq); -#endif /* BCMPCIE */ -#ifdef BCMSDIO - irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num; -#endif /* BCMSDIO */ - if (irq != -1) { -#ifdef BCMPCIE - DHD_ERROR(("DUMP data kernel irq stats : \n")); -#endif /* BCMPCIE */ -#ifdef BCMSDIO - DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n")); -#endif /* BCMSDIO */ - dhd_print_kirqstats(dhd, irq); - } -#ifdef BCMPCIE_OOB_HOST_WAKE - irq = dhdpcie_get_oob_irq_num(dhd->bus); - if (irq) { - DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n")); - dhd_print_kirqstats(dhd, irq); - } -#endif /* BCMPCIE_OOB_HOST_WAKE */ -} - void dhd_print_tasklet_status(dhd_pub_t *dhd) { @@ -21715,6 +20428,11 @@ dhd_print_tasklet_status(dhd_pub_t *dhd) #define DHD_RING_MAGIC 0x20170910 #define DHD_RING_IDX_INVALID 0xffffffff +#define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) + typedef struct { uint32 elem_size; uint32 elem_cnt; @@ -21729,12 +20447,28 @@ typedef struct { void *elem; } dhd_fixed_ring_info_t; +typedef struct { + uint32 elem_size; + uint32 elem_cnt; + uint32 idx; /* -1 : not started */ + uint32 rsvd; /* reserved for future use */ + + /* protected elements during serialization */ + atomic_t ring_locked; + /* check the overwriting */ + uint32 ring_overwrited; + + /* saved data elements */ + void *elem; +} dhd_singleidx_ring_info_t; + typedef struct { uint32 magic; uint32 type; - struct mutex ring_sync; /* pointer to mutex */ + void *ring_sync; /* spinlock for sync */ union { dhd_fixed_ring_info_t fixed; + dhd_singleidx_ring_info_t single; }; } dhd_ring_info_t; @@ -21745,7 +20479,8 @@ dhd_ring_get_hdr_size(void) } void * -dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt) +dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size, + uint32 elem_cnt, uint32 type) { dhd_ring_info_t *ret_ring; @@ -21753,29 +20488,46 @@ dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt) DHD_RING_ERR(("NO RING BUFFER\n")); return NULL; } + if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) { DHD_RING_ERR(("RING SIZE IS TOO SMALL\n")); return NULL; } + if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) { + DHD_RING_ERR(("UNSUPPORTED RING TYPE\n")); + return NULL; + } + ret_ring = (dhd_ring_info_t *)buf; - ret_ring->type = DHD_RING_TYPE_FIXED; - mutex_init(&ret_ring->ring_sync); - ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID; - ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID; - ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID; - ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t); - ret_ring->fixed.elem_size = elem_size; - ret_ring->fixed.elem_cnt = elem_cnt; + ret_ring->type = type; + ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh); ret_ring->magic = DHD_RING_MAGIC; + + if (type == DHD_RING_TYPE_FIXED) { + ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t); + ret_ring->fixed.elem_size = elem_size; + ret_ring->fixed.elem_cnt = elem_cnt; + } else { + ret_ring->single.idx = DHD_RING_IDX_INVALID; + atomic_set(&ret_ring->single.ring_locked, 0); + ret_ring->single.ring_overwrited = 0; + ret_ring->single.rsvd = 0; + ret_ring->single.elem = buf + sizeof(dhd_ring_info_t); + ret_ring->single.elem_size = elem_size; + ret_ring->single.elem_cnt = elem_cnt; + } + return ret_ring; } void -dhd_ring_deinit(void *_ring) +dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; - dhd_fixed_ring_info_t *fixed; if (!ring) { return; } @@ -21784,15 +20536,66 @@ dhd_ring_deinit(void *_ring) return; } - mutex_destroy(&ring->ring_sync); - fixed = &ring->fixed; - memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt); - fixed->elem_size = fixed->elem_cnt = 0; + if (ring->type != DHD_RING_TYPE_FIXED && + ring->type != DHD_RING_TYPE_SINGLE_IDX) { + return; + } + + DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync); + ring->ring_sync = NULL; + if (ring->type == DHD_RING_TYPE_FIXED) { + dhd_fixed_ring_info_t *fixed = &ring->fixed; + memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt); + fixed->elem_size = fixed->elem_cnt = 0; + } else { + dhd_singleidx_ring_info_t *single = &ring->single; + memset(single->elem, 0, single->elem_size * single->elem_cnt); + single->elem_size = single->elem_cnt = 0; + } ring->type = 0; ring->magic = 0; - return; } +static inline uint32 +__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type) +{ + uint32 diff; + uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID; + uint32 elem_size, elem_cnt; + void *elem; + + if (type == DHD_RING_TYPE_FIXED) { + dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring; + elem_size = fixed->elem_size; + elem_cnt = fixed->elem_cnt; + elem = fixed->elem; + } else if (type == DHD_RING_TYPE_SINGLE_IDX) { + dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring; + elem_size = single->elem_size; + elem_cnt = single->elem_cnt; + elem = single->elem; + } else { + DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type)); + return ret_idx; + } + + if (ptr < elem) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem)); + return ret_idx; + } + diff = (uint32)((uint8 *)ptr - (uint8 *)elem); + if (diff % elem_size != 0) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem)); + return ret_idx; + } + ret_idx = diff / elem_size; + if (ret_idx >= elem_cnt) { + DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx)); + } + return ret_idx; +} + +/* Sub functions for fixed ring */ /* get counts between two indexes of ring buffer (internal only) */ static inline int __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end) @@ -21874,30 +20677,8 @@ __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring) return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx); } -static inline uint32 -__dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig) -{ - uint32 diff; - uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID; - - if (ptr < ring->elem) { - DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem)); - return ret_idx; - } - diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem); - if (diff % ring->elem_size != 0) { - DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem)); - return ret_idx; - } - ret_idx = diff / ring->elem_size; - if (ret_idx >= ring->elem_cnt) { - DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx)); - } - return ret_idx; -} - static inline void * -__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev) +__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type) { uint32 cur_idx; @@ -21906,7 +20687,7 @@ __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev) return NULL; } - cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT"); + cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type); if (cur_idx >= ring->elem_cnt) { return NULL; } @@ -21921,7 +20702,7 @@ __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev) } static inline void * -__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev) +__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type) { uint32 cur_idx; @@ -21929,7 +20710,7 @@ __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev) DHD_RING_ERR(("EMPTY RING\n")); return NULL; } - cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV"); + cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type); if (cur_idx >= ring->elem_cnt) { return NULL; } @@ -21943,7 +20724,7 @@ __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev) } static inline void -__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr) +__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type) { uint32 first_idx; uint32 last_idx; @@ -21956,7 +20737,7 @@ __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_p } if (first_ptr) { - first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST"); + first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type); if (first_idx >= ring->elem_cnt) { return; } @@ -21965,7 +20746,7 @@ __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_p } if (last_ptr) { - last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST"); + last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type); if (last_idx >= ring->elem_cnt) { return; } @@ -22071,23 +20852,170 @@ __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring) return; } +static inline void +__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx) +{ + ring->read_idx = idx; +} + +static inline void +__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx) +{ + ring->write_idx = idx; +} + +static inline uint32 +__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring) +{ + return ring->read_idx; +} + +static inline uint32 +__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring) +{ + return ring->write_idx; +} + +/* Sub functions for single index ring */ +static inline void * +__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring) +{ + uint32 tmp_idx = 0; + + if (ring->idx == DHD_RING_IDX_INVALID) { + return NULL; + } + + if (ring->ring_overwrited) { + tmp_idx = (ring->idx + 1) % ring->elem_cnt; + } + + return (uint8 *)ring->elem + (ring->elem_size * tmp_idx); +} + +static inline void * +__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring) +{ + if (ring->idx == DHD_RING_IDX_INVALID) { + return NULL; + } + + return (uint8 *)ring->elem + (ring->elem_size * ring->idx); +} + +static inline void * +__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring) +{ + if (ring->idx == DHD_RING_IDX_INVALID) { + ring->idx = 0; + return (uint8 *)ring->elem; + } + + /* check the lock is held */ + if (atomic_read(&ring->ring_locked)) { + return NULL; + } + + /* check the index rollover */ + if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) { + ring->ring_overwrited = 1; + } + + ring->idx = (ring->idx + 1) % ring->elem_cnt; + + return (uint8 *)ring->elem + (ring->elem_size * ring->idx); +} + +static inline void * +__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + + cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (cur_idx == ring->idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + 1) % ring->elem_cnt; + + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void * +__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type) +{ + uint32 cur_idx; + + if (ring->idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (!ring->ring_overwrited && cur_idx == 0) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt; + if (ring->ring_overwrited && cur_idx == ring->idx) { + /* no more new record */ + return NULL; + } + + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void +__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring) +{ + if (!atomic_read(&ring->ring_locked)) { + atomic_set(&ring->ring_locked, 1); + } +} + +static inline void +__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring) +{ + if (atomic_read(&ring->ring_locked)) { + atomic_set(&ring->ring_locked, 0); + } +} + /* Get first element : oldest element */ void * dhd_ring_get_first(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_get_first(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_first(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22096,18 +21024,96 @@ void dhd_ring_free_first(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { __dhd_fixed_ring_free_first(&ring->fixed); } - mutex_unlock(&ring->ring_sync); - return; + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_set_read_idx(void *_ring, uint32 read_idx) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_set_write_idx(void *_ring, uint32 write_idx) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +uint32 +dhd_ring_get_read_idx(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + uint32 read_idx = DHD_RING_IDX_INVALID; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return read_idx; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + + return read_idx; +} + +uint32 +dhd_ring_get_write_idx(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + uint32 write_idx = DHD_RING_IDX_INVALID; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return write_idx; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_FIXED) { + write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); + + return write_idx; } /* Get latest element */ @@ -22116,17 +21122,21 @@ dhd_ring_get_last(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_get_last(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_last(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22139,17 +21149,21 @@ dhd_ring_get_empty(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_get_empty(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_empty(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22158,17 +21172,21 @@ dhd_ring_get_next(void *_ring, void *cur) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { - ret = __dhd_fixed_ring_get_next(&ring->fixed, cur); + ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22177,17 +21195,21 @@ dhd_ring_get_prev(void *_ring, void *cur) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { - ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur); + ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type); + } + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22196,17 +21218,18 @@ dhd_ring_get_cur_size(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; int cnt = 0; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return cnt; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return cnt; } @@ -22215,18 +21238,18 @@ void dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { - __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr); + __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type); } - mutex_unlock(&ring->ring_sync); - return; + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); } /* free all lock */ @@ -22234,18 +21257,18 @@ void dhd_ring_lock_free(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { __dhd_fixed_ring_lock_free(&ring->fixed); } - mutex_unlock(&ring->ring_sync); - return; + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); } void * @@ -22253,17 +21276,18 @@ dhd_ring_lock_get_first(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_lock_get_first(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22272,17 +21296,18 @@ dhd_ring_lock_get_last(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; void *ret = NULL; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return NULL; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_lock_get_last(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22291,17 +21316,18 @@ dhd_ring_lock_get_count(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; int ret = BCME_ERROR; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return ret; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { ret = __dhd_fixed_ring_lock_get_count(&ring->fixed); } - mutex_unlock(&ring->ring_sync); + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); return ret; } @@ -22310,21 +21336,56 @@ void dhd_ring_lock_free_first(void *_ring) { dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; if (!ring || ring->magic != DHD_RING_MAGIC) { DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); return; } - mutex_lock(&ring->ring_sync); + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); if (ring->type == DHD_RING_TYPE_FIXED) { __dhd_fixed_ring_lock_free_first(&ring->fixed); } - mutex_unlock(&ring->ring_sync); - return; + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_whole_lock(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + __dhd_singleidx_ring_whole_lock(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); +} + +void +dhd_ring_whole_unlock(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + unsigned long flags; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + DHD_RING_SYNC_LOCK(ring->ring_sync, flags); + if (ring->type == DHD_RING_TYPE_SINGLE_IDX) { + __dhd_singleidx_ring_whole_unlock(&ring->single); + } + DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags); } -#ifdef DHD_DUMP_MNGR #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) #define DHD_VFS_INODE(dir) (dir->d_inode) #else @@ -22336,8 +21397,7 @@ dhd_ring_lock_free_first(void *_ring) #else #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c) #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */ - -static int +int dhd_file_delete(char *path) { struct path file_path; @@ -22347,16 +21407,17 @@ dhd_file_delete(char *path) err = kern_path(path, 0, &file_path); if (err < 0) { + DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err)); return err; } - if (FALSE || + if ( #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) !d_is_file(file_path.dentry) || -#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) - d_really_is_negative(file_path.dentry) -#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0)) + d_really_is_negative(file_path.dentry) || +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ -) + FALSE) { err = -EINVAL; } else { @@ -22378,7 +21439,7 @@ dhd_file_delete(char *path) return err; } - +#ifdef DHD_DUMP_MNGR static int dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname) { @@ -22402,6 +21463,7 @@ dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname) if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) { strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE); + fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0'; fm_ptr->elems[fm_idx].file_idx = 0; } @@ -22454,6 +21516,7 @@ dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname) /* save dump file path */ strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE); + elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0'; /* change file index to next file index */ elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX; @@ -22463,13 +21526,19 @@ dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname) #ifdef DHD_MAP_LOGGING /* Will be called from SMMU fault handler */ void -dhd_debug_info_dump(void) +dhd_smmu_fault_handler(uint32 axid, ulong fault_addr) { dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub; uint32 irq = (uint32)-1; DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__)); + DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr)); dhdp->smmu_fault_occurred = TRUE; +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->axi_error = TRUE; + dhdp->axi_err_dump->axid = axid; + dhdp->axi_err_dump->fault_address = fault_addr; +#endif /* DNGL_AXI_ERROR_LOGGING */ /* Disable PCIe IRQ */ dhdpcie_get_pcieirq(dhdp->bus, &irq); @@ -22477,18 +21546,21 @@ dhd_debug_info_dump(void) disable_irq_nosync(irq); } + /* Take debug information first */ DHD_OS_WAKE_LOCK(dhdp); - dhd_prot_debug_info_print(dhdp); - osl_dma_map_dump(); -#ifdef DHD_FW_COREDUMP - /* Load the dongle side dump to host memory */ - dhdp->memdump_enabled = DUMP_MEMONLY; - dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT; - dhd_bus_mem_dump(dhdp); -#endif /* DHD_FW_COREDUMP */ + dhd_prot_smmu_fault_dump(dhdp); DHD_OS_WAKE_UNLOCK(dhdp); + + /* Take AXI information if possible */ +#ifdef DNGL_AXI_ERROR_LOGGING +#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR + dhd_axi_error_dispatch(dhdp); +#else + dhd_axi_error(dhdp); +#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#endif /* DNGL_AXI_ERROR_LOGGING */ } -EXPORT_SYMBOL(dhd_debug_info_dump); +EXPORT_SYMBOL(dhd_smmu_fault_handler); #endif /* DHD_MAP_LOGGING */ #ifdef DHD_WIFI_SHUTDOWN @@ -22519,14 +21591,309 @@ compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long c { return (int)kernel_read(file, addr, (size_t)count, &offset); } +int +compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset) +{ + return (int)kernel_write(file, addr, count, offset); +} #else int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count) { return kernel_read(file, offset, addr, count); } +int +compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset) +{ + return (int)vfs_write(file, addr, count, offset); +} #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */ +#ifdef DHDTCPSYNC_FLOOD_BLK +static void dhd_blk_tsfl_handler(struct work_struct * work) +{ + dhd_if_t *ifp = NULL; + dhd_pub_t *dhdp = NULL; + /* Ignore compiler warnings due to -Werror=cast-qual */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif /* STRICT_GCC_WARNINGS && __GNUC__ */ + ifp = container_of(work, dhd_if_t, blk_tsfl_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif /* STRICT_GCC_WARNINGS && __GNUC__ */ + if (ifp) { + dhdp = &ifp->info->pub; + if (dhdp) { + if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)|| + (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n")); + wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED); + } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)|| + (dhdp->op_mode & DHD_FLAG_STA_MODE)) { + DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n")); + wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED); + } + } + } +} +void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp) +{ + ifp->tsync_rcvd = 0; + ifp->tsyncack_txed = 0; + ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); +} +void dhd_reset_tcpsync_info_by_dev(struct net_device *dev) +{ + dhd_if_t *ifp = NULL; + if (dev) { + ifp = DHD_DEV_IFP(dev); + } + if (ifp) { + ifp->tsync_rcvd = 0; + ifp->tsyncack_txed = 0; + ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC); + } +} +#endif /* DHDTCPSYNC_FLOOD_BLK */ + +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +static void dhd_m4_state_handler(struct work_struct *work) +{ + dhd_if_t *ifp = NULL; + /* Ignore compiler warnings due to -Werror=cast-qual */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct delayed_work *dw = to_delayed_work(work); + ifp = container_of(dw, dhd_if_t, m4state_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + if (ifp && ifp->net && + (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) { + DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n", + ifp->net->name)); + wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT); + } +} + +void +dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + if (!success) { + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + if (type == ETHER_TYPE_802_1X) { + if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) { + dhd_if_t *ifp = NULL; + ifp = dhd->iflist[ifidx]; + if (!ifp || !ifp->net) { + return; + } + + DHD_INFO(("%s: M4 TX failed on %d.\n", + __FUNCTION__, ifidx)); + + OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED); + schedule_delayed_work(&ifp->m4state_work, + msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS)); + } + } + } +} + +void +dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhdinfo; + dhd_if_t *ifp; + + if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) { + DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx)); + return; + } + + dhdinfo = (dhd_info_t *)(dhdp->info); + if (!dhdinfo) { + DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__)); + return; + } + + ifp = dhdinfo->iflist[ifidx]; + if (ifp) { + cancel_delayed_work_sync(&ifp->m4state_work); + } +} +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHD_HP2P +unsigned long +dhd_os_hp2plock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + spin_lock_irqsave(&dhd->hp2p_lock, flags); + } + + return flags; +} + +void +dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + spin_unlock_irqrestore(&dhd->hp2p_lock, flags); + } +} +#endif /* DHD_HP2P */ +#ifdef DNGL_AXI_ERROR_LOGGING +static void +dhd_axi_error_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = (dhd_info_t *)handle; + dhd_pub_t *dhdp = NULL; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + goto exit; + } + + dhdp = &dhd->pub; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + goto exit; + } + + /** + * First save axi error information to a file + * because panic should happen right after this. + * After dhd reset, dhd reads the file, and do hang event process + * to send axi error stored on the file to Bigdata server + */ + if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) { + DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", + __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version)); + } + + DHD_OS_WAKE_LOCK(dhdp); +#ifdef DHD_FW_COREDUMP +#ifdef DHD_SSSR_DUMP + dhdp->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ + DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); + +exit: + /* Trigger kernel panic after taking necessary dumps */ + BUG_ON(1); +} + +void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type) +{ + DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + type, DHD_WQ_WORK_AXI_ERROR_DUMP, + dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef BCMPCIE +static void +dhd_cto_recovery_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = NULL; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + BUG_ON(1); + return; + } + + dhdp = &dhd->pub; + dhdpcie_cto_recovery_handler(dhdp); +} + +void +dhd_schedule_cto_recovery(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + NULL, DHD_WQ_WORK_CTO_RECOVERY, + dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* BCMPCIE */ + +#ifdef SUPPORT_SET_TID +/* + * Set custom TID value for UDP frame based on UID value. + * This will be triggered by android private command below. + * DRIVER SET_TID + * Mode 0(SET_TID_OFF) : Disable changing TID + * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames + * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID +*/ +void +dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt) +{ + struct ether_header *eh = NULL; + struct sock *sk = NULL; + uint8 *pktdata = NULL; + uint8 *ip_hdr = NULL; + uint8 cur_prio; + uint8 prio; + uint32 uid; + + if (dhdp->tid_mode == SET_TID_OFF) { + return; + } + + pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt); + eh = (struct ether_header *) pktdata; + ip_hdr = (uint8 *)eh + ETHER_HDR_LEN; + + if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) { + return; + } + + cur_prio = PKTPRIO(pkt); + prio = dhdp->target_tid; + uid = dhdp->target_uid; + + if ((cur_prio == prio) || + (cur_prio != PRIO_8021D_BE)) { + return; + } + + sk = ((struct sk_buff*)(pkt))->sk; + + if ((dhdp->tid_mode == SET_TID_ALL_UDP) || + (sk && (uid == __kuid_val(sock_i_uid(sk))))) { + PKTSETPRIO(pkt, prio); + } +} +#endif /* SUPPORT_SET_TID */ + void *dhd_get_pub(struct net_device *dev) { dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); @@ -22561,3 +21928,68 @@ bool dhd_os_wd_timer_enabled(void *bus) } return dhd->wd_timer_valid; } + +#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG) +/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */ +static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev) +{ + struct net_device *primary_ndev = NULL, *br_dev = NULL; + int cmd; + struct ifreq ifr; + + /* add new interface to bridge dev */ + if (dev) { + int found = 0, i; + DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx)); + for (i = 0 ; i < ifidx; i++) { + DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx)); + /* search the primary interface */ + if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) { + primary_ndev = dhd->pub.info->iflist[i]->net; + DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name)); + found = 1; + break; + } + } + if (found == 0) { + DHD_ERROR(("Can not find primary dev %s\n", dev->name)); + return; + } + cmd = SIOCBRADDIF; + ifr.ifr_ifindex = dev->ifindex; + } else { /* del interface from bridge dev */ + primary_ndev = dhd->pub.info->iflist[ifidx]->net; + cmd = SIOCBRDELIF; + ifr.ifr_ifindex = primary_ndev->ifindex; + } + /* if primary net device is bridged */ + if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) { + rtnl_lock(); + /* get bridge device */ + br_dev = netdev_master_upper_dev_get(primary_ndev); + if (br_dev) { + const struct net_device_ops *ops = br_dev->netdev_ops; + DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name)); + if (ops) { + if (cmd == SIOCBRADDIF) { + DHD_ERROR(("br call ndo_add_slave\n")); + ops->ndo_add_slave(br_dev, dev); + /* Also bring wds0.x interface up automatically */ + dev_change_flags(dev, dev->flags | IFF_UP); + } + else { + DHD_ERROR(("br call ndo_del_slave\n")); + ops->ndo_del_slave(br_dev, primary_ndev); + } + } + } + else { + DHD_ERROR(("no br dev\n")); + } + rtnl_unlock(); + } + else { + DHD_ERROR(("device %s is not bridged\n", primary_ndev->name)); + } +} +#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */ diff --git a/bcmdhd.100.10.315.x/dhd_linux.h b/bcmdhd.100.10.315.x/dhd_linux.h index 3c9b4ab..ae2b4e2 100644 --- a/bcmdhd.100.10.315.x/dhd_linux.h +++ b/bcmdhd.100.10.315.x/dhd_linux.h @@ -1,7 +1,7 @@ /* * DHD Linux header file (dhd_linux exports for cfg80211 and other components) * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_linux.h 767863 2018-06-15 10:19:19Z $ + * $Id: dhd_linux.h 816392 2019-04-24 14:39:02Z $ */ /* wifi platform functions for power, interrupt and pre-alloc, either @@ -47,6 +47,186 @@ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) #include #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#ifdef PCIE_FULL_DONGLE +#include +#endif /* PCIE_FULL_DONGLE */ +#ifdef WL_MONITOR +#include +#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */ +#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE) +#endif /* WL_MONITOR */ + +#define FILE_DUMP_MAX_WAIT_TIME 4000 + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#ifdef BLOCK_IPV6_PACKET +#define HEX_PREF_STR "0x" +#define UNI_FILTER_STR "010000000000" +#define ZERO_ADDR_STR "000000000000" +#define ETHER_TYPE_STR "0000" +#define IPV6_FILTER_STR "20" +#define ZERO_TYPE_STR "00" +#endif /* BLOCK_IPV6_PACKET */ + +typedef struct dhd_if_event { + struct list_head list; + wl_event_data_if_t event; + char name[IFNAMSIZ+1]; + uint8 mac[ETHER_ADDR_LEN]; +} dhd_if_event_t; + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + int idx; /* iface idx in dongle */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool set_macaddress; + bool set_multicast; + uint8 bssidx; /* bsscfg index for the interface */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ + struct net_device_stats stats; +#ifdef PCIE_FULL_DONGLE + struct list_head sta_list; /* sll of associated stations */ + spinlock_t sta_list_lock; /* lock for manipulating sll */ +#endif /* PCIE_FULL_DONGLE */ + uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; + /* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; + bool block_tdls; +#endif /* DHD_L2_FILTER */ +#ifdef DHD_MCAST_REGEN + bool mcast_regen_bss_enable; +#endif // endif + bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint8 tx_paths_active; + bool del_in_progress; + bool static_if; /* used to avoid some operations on static_if */ +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + struct delayed_work m4state_work; + atomic_t m4state; +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ +#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT + bool recv_reassoc_evt; + bool post_roam_evt; +#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */ +#ifdef DHDTCPSYNC_FLOOD_BLK + uint32 tsync_rcvd; + uint32 tsyncack_txed; + u64 last_sync; + struct work_struct blk_tsfl_work; +#endif /* DHDTCPSYNC_FLOOD_BLK */ +} dhd_if_t; + +struct ipv6_work_info_t { + uint8 if_idx; + char ipv6_addr[IPV6_ADDR_LEN]; + unsigned long event; +}; + +typedef struct dhd_dump { + uint8 *buf; + int bufsize; + uint8 *hscb_buf; + int hscb_bufsize; +} dhd_dump_t; +#ifdef DNGL_AXI_ERROR_LOGGING +typedef struct dhd_axi_error_dump { + ulong fault_address; + uint32 axid; + struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1; +} dhd_axi_error_dump_t; +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +struct dhd_rx_tx_work { + struct work_struct work; + struct sk_buff *skb; + struct net_device *net; + struct dhd_pub *pub; +}; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(DHD_LB) +#if !defined(PCIE_FULL_DONGLE) +#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" +#endif /* !PCIE_FULL_DONGLE */ +#endif /* DHD_LB */ + +#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \ + defined(DHD_LB_STATS) +#if !defined(DHD_LB) +#error "DHD loadbalance derivatives are supported only if DHD_LB is defined" +#endif /* !DHD_LB */ +#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */ + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing */ +#include +#include +#include +#include +#include + +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif // endif +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif // endif + +#define HIST_BIN_SIZE 9 + +#if defined(DHD_LB_TXP) +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_tx_lb_pkttag_fr { + struct net_device *net; + int ifidx; +} dhd_tx_lb_pkttag_fr_t; + +#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) +#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) + +#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) +#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) +#endif /* DHD_LB_TXP */ + +#endif /* DHD_LB */ + +#ifdef FILTER_IE +#define FILTER_IE_PATH "/etc/wifi/filter_ie" +#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */ +#define FILE_BLOCK_READ_SIZE 256 +#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs) +#endif /* FILTER_IE */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) /* dongle status */ enum wifi_adapter_status { @@ -107,9 +287,9 @@ struct wifi_platform_data { #ifdef BCMSDIO int (*get_wake_irq)(void); #endif // endif -#if defined(CUSTOM_COUNTRY_CODE) +#ifdef CUSTOM_FORCE_NODFS_FLAG void *(*get_country_code)(char *ccode, u32 flags); -#else /* defined (CUSTOM_COUNTRY_CODE) */ +#else /* defined (CUSTOM_FORCE_NODFS_FLAG) */ void *(*get_country_code)(char *ccode); #endif }; @@ -131,6 +311,95 @@ typedef struct dhd_sta { } dhd_sta_t; typedef dhd_sta_t dhd_sta_pool_t; +#ifdef DHD_4WAYM4_FAIL_DISCONNECT +typedef enum { + M3_RXED, + M4_TXFAILED +} msg_4way_state_t; +#define MAX_4WAY_TIMEOUT_MS 2000 +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ + +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS +extern uint32 report_hang_privcmd_err; +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ + +#if defined(ARGOS_NOTIFY_CB) +int argos_register_notifier_init(struct net_device *net); +int argos_register_notifier_deinit(void); + +extern int sec_argos_register_notifier(struct notifier_block *n, char *label); +extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label); + +typedef struct { + struct net_device *wlan_primary_netdev; + int argos_rps_cpus_enabled; +} argos_rps_ctrl; + +#define RPS_TPUT_THRESHOLD 300 +#define DELAY_TO_CLEAR_RPS_CPUS 300 +#endif // endif + +#if defined(BT_OVER_SDIO) +extern void wl_android_set_wifi_on_flag(bool enable); +#endif /* BT_OVER_SDIO */ + +#ifdef DHD_LOG_DUMP +/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE +* 2: DLD_BUF_TYPE_SPECIAL +*/ +#define DLD_BUFFER_NUM 3 + +#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB +#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */ +#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */ + +#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) + +/* + * Below are different sections that use the prealloced buffer + * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE + */ +#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) + +#if LOG_DUMP_TOTAL_BUFSIZE < (LOG_DUMP_GENERAL_MAX_BUFSIZE + \ + LOG_DUMP_PRESERVE_MAX_BUFSIZE + LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE \ + + LOG_DUMP_FILTER_MAX_BUFSIZE) +#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings" +#endif // endif + +/* Special buffer is allocated as separately in prealloc */ +#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024) + +#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */ +#ifdef CONFIG_LOG_BUF_SHIFT +/* 15% of kernel log buf size, if for example klog buf size is 512KB +* 15% of 512KB ~= 80KB +*/ +#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \ + (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100)) +#endif /* CONFIG_LOG_BUF_SHIFT */ + +#define LOG_DUMP_COOKIE_BUFSIZE 1024u + +typedef struct { + char *hdr_str; + log_dump_section_type_t sec_type; +} dld_hdr_t; + +typedef struct { + int attr; + char *hdr_str; + log_dump_section_type_t sec_type; + int log_type; +} dld_log_hdr_t; + +#define DHD_PRINT_BUF_NAME_LEN 30 +#endif /* DHD_LOG_DUMP */ + int dhd_wifi_platform_register_drv(void); void dhd_wifi_platform_unregister_drv(void); wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type, @@ -163,7 +432,12 @@ int dhd_net_bus_put(struct net_device *dev); int dhd_enable_adps(dhd_pub_t *dhd, uint8 on); #endif // endif +#ifdef DHDTCPSYNC_FLOOD_BLK +extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp); +extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev); +#endif /* DHDTCPSYNC_FLOOD_BLK */ int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count); +int compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset); #endif /* __DHD_LINUX_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_exportfs.c b/bcmdhd.100.10.315.x/dhd_linux_exportfs.c new file mode 100644 index 0000000..6631cd7 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_exportfs.c @@ -0,0 +1,1384 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_exportfs.c 808905 2019-03-11 10:32:39Z $ + */ +#include +#include +#include +#include +#include +#include +#ifdef DHD_ADPS_BAM_EXPORT +#include +#endif // endif + +#ifdef SHOW_LOGTRACE +extern dhd_pub_t* g_dhd_pub; +static int dhd_ring_proc_open(struct inode *inode, struct file *file); +ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff); + +static const struct file_operations dhd_ring_proc_fops = { + .open = dhd_ring_proc_open, + .read = dhd_ring_proc_read, + .release = single_release, +}; + +static int +dhd_ring_proc_open(struct inode *inode, struct file *file) +{ + int ret = BCME_ERROR; + if (inode) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) + ret = single_open(file, 0, PDE_DATA(inode)); +#else + /* This feature is not supported for lower kernel versions */ + ret = single_open(file, 0, NULL); +#endif // endif + } else { + DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__)); + } + return ret; +} + +ssize_t +dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff) +{ + trace_buf_info_t *trace_buf_info; + int ret = BCME_ERROR; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private; + + if (ring == NULL) { + DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__)); + return ret; + } + + ASSERT(g_dhd_pub); + + trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t)); + if (trace_buf_info) { + dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info); + if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt))) + { + ret = -EFAULT; + goto exit; + } + if (trace_buf_info->availability == BUF_NOT_AVAILABLE) + ret = BUF_NOT_AVAILABLE; + else + ret = trace_buf_info->size; + } else + DHD_ERROR(("Memory allocation Failed\n")); + +exit: + if (trace_buf_info) { + MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + } + return ret; +} + +void +dhd_dbg_ring_proc_create(dhd_pub_t *dhdp) +{ +#ifdef DEBUGABILITY + dhd_dbg_ring_t *dbg_verbose_ring = NULL; + + dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID); + if (dbg_verbose_ring) { + if (!proc_create_data("dhd_trace", S_IRUSR, NULL, &dhd_ring_proc_fops, + dbg_verbose_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_trace procfs interface\n")); + } + } else { + DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n")); + } +#endif /* DEBUGABILITY */ + +#ifdef EWP_ECNTRS_LOGGING + if (!proc_create_data("dhd_ecounters", S_IRUSR, NULL, &dhd_ring_proc_fops, + dhdp->ecntr_dbg_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n")); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + if (!proc_create_data("dhd_rtt", S_IRUSR, NULL, &dhd_ring_proc_fops, + dhdp->rtt_dbg_ring)) { + DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n")); + } else { + DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n")); + } +#endif /* EWP_RTT_LOGGING */ +} + +void +dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp) +{ +#ifdef DEBUGABILITY + remove_proc_entry("dhd_trace", NULL); +#endif /* DEBUGABILITY */ + +#ifdef EWP_ECNTRS_LOGGING + remove_proc_entry("dhd_ecounters", NULL); +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef EWP_RTT_LOGGING + remove_proc_entry("dhd_rtt", NULL); +#endif /* EWP_RTT_LOGGING */ + +} +#endif /* SHOW_LOGTRACE */ + +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ + +#if defined(DHD_TRACE_WAKE_LOCK) +extern atomic_t trace_wklock_onoff; + +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + + buf[ret] = '\n'; + buf[ret+1] = 0; + + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} + +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + BCM_REFERENCE(dhd); + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + atomic_set(&trace_wklock_onoff, onoff); + if (atomic_read(&trace_wklock_onoff)) { + printk("ENABLE WAKLOCK TRACE\n"); + } else { + printk("DISABLE WAKELOCK TRACE\n"); + } + + return (ssize_t)(onoff+1); +} +#endif /* DHD_TRACE_WAKE_LOCK */ + +#if defined(DHD_LB_TXP) +static ssize_t +show_lbtxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_txp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + int i; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_txp_active, onoff); + + /* Since the scheme is changed clear the counters */ + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + } + + return count; +} + +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +static ssize_t +show_lbrxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_rxp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + int i, j; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_rxp_active, onoff); + + /* Since the scheme is changed clear the counters */ + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + for (j = 0; j < HIST_BIN_SIZE; j++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } + + return count; +} +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LOG_DUMP +extern int logdump_periodic_flush; +extern int logdump_ecntr_enable; +static ssize_t +show_logdump_periodic_flush(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_periodic_flush; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_periodic_flush = val; + return count; +} + +static ssize_t +show_logdump_ecntr(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_ecntr_enable; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_ecntr_enable = val; + return count; +} + +#endif /* DHD_LOG_DUMP */ + +extern uint enable_ecounter; +static ssize_t +show_enable_ecounter(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + + onoff = enable_ecounter; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + dhdp = &dhd->pub; + if (!FW_SUPPORTED(dhdp, ecounters)) { + DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__)); + return count; + } + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + if (enable_ecounter == onoff) { + DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter)); + return count; + } + + enable_ecounter = onoff; + dhd_ecounter_configure(dhdp, enable_ecounter); + + return count; +} + +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +#if defined(DHD_LB_TXP) +static struct dhd_attr dhd_attr_lbtxp = + __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff); +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +static struct dhd_attr dhd_attr_lbrxp = + __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LOG_DUMP +static struct dhd_attr dhd_attr_logdump_periodic_flush = + __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush, + logdump_periodic_flush_onoff); +static struct dhd_attr dhd_attr_logdump_ecntr = + __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr, + logdump_ecntr_onoff); +#endif /* DHD_LOG_DUMP */ + +static struct dhd_attr dhd_attr_ecounters = + __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff); + +/* Attribute object that gets registered with "bcm-dhd" kobject tree */ +static struct attribute *default_attrs[] = { +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif // endif +#if defined(DHD_LB_TXP) + &dhd_attr_lbtxp.attr, +#endif /* DHD_LB_TXP */ +#if defined(DHD_LB_RXP) + &dhd_attr_lbrxp.attr, +#endif /* DHD_LB_RXP */ +#ifdef DHD_LOG_DUMP + &dhd_attr_logdump_periodic_flush.attr, + &dhd_attr_logdump_ecntr.attr, +#endif // endif + &dhd_attr_ecounters.attr, + NULL +}; + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; + +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_attrs, +}; + +#ifdef DHD_MAC_ADDR_EXPORT +struct ether_addr sysfs_mac_addr; +static ssize_t +show_mac_addr(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, MACF, + (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1], + (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3], + (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]); + + return ret; +} + +static ssize_t +set_mac_addr(struct dhd_info *dev, const char *buf, size_t count) +{ + if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) { + DHD_ERROR(("Invalid Mac Address \n")); + return -EINVAL; + } + + DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr))); + + return count; +} + +static struct dhd_attr dhd_attr_cntl_macaddr = + __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr); +#endif /* DHD_MAC_ADDR_EXPORT */ + +#ifdef DHD_FW_COREDUMP + +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" + +uint32 +get_mem_val_from_file(void) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + char *p_mem_val = NULL; + char *filepath = MEMDUMPINFO; + int ret = 0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); +#if defined(CONFIG_X86) + /* Check if it is Live Brix Image */ + if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) { + goto done; + } + /* Try if it is Installed Brix Image */ + filepath = MEMDUMPINFO_INST; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } +#else /* Non Brix Android platform */ + goto done; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + /* Handle success case */ + ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + p_mem_val = (char*)&mem_val; + p_mem_val[sizeof(uint32) - 1] = '\0'; + mem_val = bcm_atoi(p_mem_val); + + filp_close(fp, NULL); + +done: + return mem_val; +} + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ +#ifndef DHD_EXPORT_CNTL_FILE + uint32 mem_val = DUMP_MEMFILE_MAX; + + mem_val = get_mem_val_from_file(); + if (mem_val != DUMP_MEMFILE_MAX) + dhd->memdump_enabled = mem_val; +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX) + mem_val = DUMP_MEMFILE_BUGON; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ +#else +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX) + dhd->memdump_enabled = DUMP_MEMFILE_BUGON; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ +#endif /* !DHD_EXPORT_CNTL_FILE */ + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled)); +} + +#ifdef DHD_EXPORT_CNTL_FILE +static ssize_t +show_memdump_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_pub_t *dhdp; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + dhdp = &dev->pub; + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled); + return ret; +} + +static ssize_t +set_memdump_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long memval; + dhd_pub_t *dhdp; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + dhdp = &dev->pub; + + memval = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &memval); + + dhdp->memdump_enabled = (uint32)memval; + + DHD_ERROR(("%s: MEMDUMP ENABLED = %iu\n", __FUNCTION__, dhdp->memdump_enabled)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_memdump = + __ATTR(memdump, 0660, show_memdump_info, set_memdump_info); +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* DHD_FW_COREDUMP */ + +#ifdef BCMASSERT_LOG +#define ASSERTINFO "/data/misc/wifi/.assert.info" +int +get_assert_val_from_file(void) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + char *p_mem_val = NULL; + int mem_val = -1; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32)); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + p_mem_val = (char *)&mem_val; + p_mem_val[sizeof(uint32) - 1] = '\0'; + mem_val = bcm_atoi(p_mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + } + filp_close(fp, NULL); + } + + mem_val = (mem_val >= 0) ? mem_val : 0; + return mem_val; +} + +void dhd_get_assert_info(dhd_pub_t *dhd) +{ +#ifndef DHD_EXPORT_CNTL_FILE + int mem_val = -1; + + mem_val = get_assert_val_from_file(); + + g_assert_type = mem_val; +#endif /* !DHD_EXPORT_CNTL_FILE */ +} + +#ifdef DHD_EXPORT_CNTL_FILE +static ssize_t +show_assert_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (!dev) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return ret; + } + + ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type); + return ret; + +} + +static ssize_t +set_assert_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long assert_val; + + assert_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &assert_val); + + g_assert_type = (uint32)assert_val; + + DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val)); + return count; + +} + +static struct dhd_attr dhd_attr_cntl_assert = + __ATTR(assert, 0660, show_assert_info, set_assert_info); +#endif /* DHD_EXPORT_CNTL_FILE */ +#endif /* BCMASSERT_LOG */ + +#ifdef DHD_EXPORT_CNTL_FILE +#if defined(WRITE_WLANINFO) +static ssize_t +show_wifiver_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info); + return ret; +} + +static ssize_t +set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count) +{ + DHD_ERROR(("Do not set version info\n")); + return -EINVAL; +} + +static struct dhd_attr dhd_attr_cntl_wifiver = + __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info); +#endif /* WRITE_WLANINFO */ + +#if defined(USE_CID_CHECK) +char cidinfostr[MAX_VNAME_LEN]; + +static ssize_t +show_cid_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr); + return ret; +} + +static ssize_t +set_cid_info(struct dhd_info *dev, const char *buf, size_t count) +{ + int len = strlen(buf) + 1; + int maxstrsz; + maxstrsz = MAX_VNAME_LEN; + + scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf); + DHD_INFO(("%s : CID info string\n", cidinfostr)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_cidinfo = + __ATTR(cid, 0660, show_cid_info, set_cid_info); +#endif /* USE_CID_CHECK */ + +#if defined(GEN_SOFTAP_INFO_FILE) +char softapinfostr[SOFTAP_INFO_BUF_SZ]; +static ssize_t +show_softap_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr); + return ret; +} + +static ssize_t +set_softap_info(struct dhd_info *dev, const char *buf, size_t count) +{ + DHD_ERROR(("Do not set sofap related info\n")); + return -EINVAL; +} + +static struct dhd_attr dhd_attr_cntl_softapinfo = + __ATTR(softap, 0660, show_softap_info, set_softap_info); +#endif /* GEN_SOFTAP_INFO_FILE */ + +#if defined(MIMO_ANT_SETTING) +unsigned long antsel; + +static ssize_t +show_ant_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel); + return ret; +} + +static ssize_t +set_ant_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long ant_val; + + ant_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &ant_val); + + /* + * Check value + * 0 - Not set, handle same as file not exist + */ + if (ant_val > 3) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, ant_val)); + return -EINVAL; + } + + antsel = ant_val; + DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_antinfo = + __ATTR(ant, 0660, show_ant_info, set_ant_info); +#endif /* MIMO_ANT_SETTING */ + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +extern uint32 pmmode_val; +static ssize_t +show_pm_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (!g_pm_control) { + ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val); + } + return ret; +} + +static ssize_t +set_pm_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long pm_val; + + pm_val = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &pm_val); + + if (pm_val > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, pm_val)); + return -EINVAL; + } + + if (!pm_val) { + g_pm_control = TRUE; + } else { + g_pm_control = FALSE; + } + + pmmode_val = (uint32)pm_val; + DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_pminfo = + __ATTR(pm, 0660, show_pm_info, set_pm_info); +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#ifdef LOGTRACE_FROM_FILE +unsigned long logtrace_val = 1; + +static ssize_t +show_logtrace_info(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val); + return ret; +} + +static ssize_t +set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%lu", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + logtrace_val = onoff; + DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n", + __FUNCTION__, logtrace_val)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_logtraceinfo = + __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info); +#endif /* LOGTRACE_FROM_FILE */ + +#ifdef USE_WFA_CERT_CONF +#ifdef BCMSDIO +uint32 bus_txglom = VALUENOTSET; + +static ssize_t +show_bustxglom(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (bus_txglom == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom); + } + return ret; +} + +static ssize_t +set_bustxglom(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = (uint32)bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + bus_txglom = onoff; + DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n", + __FUNCTION__, bus_txglom)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_bustxglom = + __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom); +#endif /* BCMSDIO */ + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +uint32 roam_off = VALUENOTSET; + +static ssize_t +show_roamoff(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (roam_off == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off); + } + return ret; +} + +static ssize_t +set_roamoff(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + roam_off = onoff; + DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n", + __FUNCTION__, roam_off)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_roamoff = + __ATTR(roamoff, 0660, show_roamoff, set_roamoff); +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ + +#ifdef USE_WL_FRAMEBURST +uint32 frameburst = VALUENOTSET; + +static ssize_t +show_frameburst(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (frameburst == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst); + } + return ret; +} + +static ssize_t +set_frameburst(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + frameburst = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, frameburst)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_frameburst = + __ATTR(frameburst, 0660, show_frameburst, set_frameburst); +#endif /* USE_WL_FRAMEBURST */ + +#ifdef USE_WL_TXBF +uint32 txbf = VALUENOTSET; + +static ssize_t +show_txbf(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (txbf == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf); + } + return ret; +} + +static ssize_t +set_txbf(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_atoi(buf); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + txbf = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, txbf)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_txbf = + __ATTR(txbf, 0660, show_txbf, set_txbf); +#endif /* USE_WL_TXBF */ + +#ifdef PROP_TXSTATUS +uint32 proptx = VALUENOTSET; + +static ssize_t +show_proptx(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + if (proptx == VALUENOTSET) { + ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs"); + } else { + ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx); + } + return ret; +} + +static ssize_t +set_proptx(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 onoff; + + onoff = bcm_strtoul(buf, NULL, 10); + sscanf(buf, "%u", &onoff); + + if (onoff > 2) { + DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n", + __FUNCTION__, onoff)); + return -EINVAL; + } + + proptx = onoff; + DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n", + __FUNCTION__, txbf)); + return count; +} + +static struct dhd_attr dhd_attr_cntl_proptx = + __ATTR(proptx, 0660, show_proptx, set_proptx); + +#endif /* PROP_TXSTATUS */ +#endif /* USE_WFA_CERT_CONF */ +#endif /* DHD_EXPORT_CNTL_FILE */ + +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS +uint32 report_hang_privcmd_err = 1; + +static ssize_t +show_hang_privcmd_err(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err); + return ret; +} + +static ssize_t +set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + val = bcm_atoi(buf); + sscanf(buf, "%u", &val); + + report_hang_privcmd_err = val ? 1 : 0; + DHD_INFO(("%s: Set report HANG for private cmd error: %d\n", + __FUNCTION__, report_hang_privcmd_err)); + return count; +} + +static struct dhd_attr dhd_attr_hang_privcmd_err = + __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err); +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ + +#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB) +uint8 control_he_enab = 1; +#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */ + +#if defined(CUSTOM_CONTROL_HE_ENAB) +static ssize_t +show_control_he_enab(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + + ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab); + return ret; +} + +static ssize_t +set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count) +{ + uint32 val; + + val = bcm_atoi(buf); + + control_he_enab = val ? 1 : 0; + DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab)); + return count; +} + +static struct dhd_attr dhd_attr_control_he_enab= +__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab); +#endif /* CUSTOM_CONTROL_HE_ENAB */ +/* Attribute object that gets registered with "wifi" kobject tree */ +static struct attribute *control_file_attrs[] = { +#ifdef DHD_MAC_ADDR_EXPORT + &dhd_attr_cntl_macaddr.attr, +#endif /* DHD_MAC_ADDR_EXPORT */ +#ifdef DHD_EXPORT_CNTL_FILE +#ifdef DHD_FW_COREDUMP + &dhd_attr_cntl_memdump.attr, +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + &dhd_attr_cntl_assert.attr, +#endif /* BCMASSERT_LOG */ +#ifdef WRITE_WLANINFO + &dhd_attr_cntl_wifiver.attr, +#endif /* WRITE_WLANINFO */ +#ifdef USE_CID_CHECK + &dhd_attr_cntl_cidinfo.attr, +#endif /* USE_CID_CHECK */ +#ifdef GEN_SOFTAP_INFO_FILE + &dhd_attr_cntl_softapinfo.attr, +#endif /* GEN_SOFTAP_INFO_FILE */ +#ifdef MIMO_ANT_SETTING + &dhd_attr_cntl_antinfo.attr, +#endif /* MIMO_ANT_SETTING */ +#ifdef DHD_PM_CONTROL_FROM_FILE + &dhd_attr_cntl_pminfo.attr, +#endif /* DHD_PM_CONTROL_FROM_FILE */ +#ifdef LOGTRACE_FROM_FILE + &dhd_attr_cntl_logtraceinfo.attr, +#endif /* LOGTRACE_FROM_FILE */ +#ifdef USE_WFA_CERT_CONF +#ifdef BCMSDIO + &dhd_attr_cntl_bustxglom.attr, +#endif /* BCMSDIO */ + &dhd_attr_cntl_roamoff.attr, +#ifdef USE_WL_FRAMEBURST + &dhd_attr_cntl_frameburst.attr, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + &dhd_attr_cntl_txbf.attr, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + &dhd_attr_cntl_proptx.attr, +#endif /* PROP_TXSTATUS */ +#endif /* USE_WFA_CERT_CONF */ +#endif /* DHD_EXPORT_CNTL_FILE */ +#ifdef DHD_ADPS_BAM_EXPORT + &dhd_attr_adps_bam.attr, +#endif /* DHD_ADPS_BAM_EXPORT */ +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS + &dhd_attr_hang_privcmd_err.attr, +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ +#if defined(CUSTOM_CONTROL_HE_ENAB) + &dhd_attr_control_he_enab.attr, +#endif /* CUSTOM_CONTROL_HE_ENAB */ + NULL +}; + +#define to_cntl_dhd(k) container_of(k, struct dhd_info, dhd_conf_file_kobj) + +/* + * wifi kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi" the show function is called. + */ +static ssize_t dhd_cntl_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_cntl_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + +/* + * wifi kobject show function, the "attr" attribute specifices to which + * node under "sys/wifi" the store function is called. + */ +static ssize_t dhd_cntl_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_cntl_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_cntl_ops = { + .show = dhd_cntl_show, + .store = dhd_cntl_store, +}; + +static struct kobj_type dhd_cntl_file_ktype = { + .sysfs_ops = &dhd_sysfs_cntl_ops, + .default_attrs = control_file_attrs, +}; + +/* Create a kobject and attach to sysfs interface */ +int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; + + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } + + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + ret = kobject_init_and_add(&dhd->dhd_conf_file_kobj, &dhd_cntl_file_ktype, NULL, "wifi"); + if (ret) { + kobject_put(&dhd->dhd_conf_file_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); + kobject_uevent(&dhd->dhd_conf_file_kobj, KOBJ_ADD); + + return ret; +} + +/* Done with the kobject and detach the sysfs interface */ +void dhd_sysfs_exit(dhd_info_t *dhd) +{ + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return; + } + + /* Releae the kobject */ + if (dhd->dhd_kobj.state_initialized) + kobject_put(&dhd->dhd_kobj); + if (dhd->dhd_conf_file_kobj.state_initialized) + kobject_put(&dhd->dhd_conf_file_kobj); +} diff --git a/bcmdhd.100.10.315.x/dhd_linux_lb.c b/bcmdhd.100.10.315.x/dhd_linux_lb.c new file mode 100644 index 0000000..103fd70 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_lb.c @@ -0,0 +1,1323 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_lb.c 805819 2019-02-20 10:49:35Z $ + */ + +#include + +extern dhd_pub_t* g_dhd_pub; + +#if defined(DHD_LB) + +void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->rx_compl_cpu, 2); + atomic_set(&dhd->tx_compl_cpu, 2); + atomic_set(&dhd->tx_cpu, 2); + atomic_set(&dhd->net_tx_cpu, 0); +} + +void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} + +int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus, num_cpus = num_possible_cpus(); + int ret = 0; + + DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, + DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); + + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); + + if (num_cpus > 32) { + DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); + ASSERT(0); + } + + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } + + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } + + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} + +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * 2) Completion Processiong (Tx, RX) - compl_cpu + * + * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * If there are more processors free, it assigns one to compl_cpu. + * It also tries to ensure that both napi_cpu and compl_cpu are not on the same + * CPU, as much as possible. + * + * By design, both Tx and Rx completion jobs are run on the same CPU core, as it + * would allow Tx completion skb's to be released into a local free pool from + * which the rx buffer posts could have been serviced. it is important to note + * that a Tx packet may not have a large enough buffer for rx posting. + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 compl_cpu = 0; /* cpu selected for completion jobs */ + uint32 tx_cpu = 0; /* cpu selected for tx processing job */ + + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); + + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); + + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); + + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); + + if (primary_available_cpus > 0) { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); + + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + + /* In case there are no more CPUs, do completions & Tx in same CPU */ + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new); + if (compl_cpu >= nr_cpu_ids) + compl_cpu = tx_cpu; + } + + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); + + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); + + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); + + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); + } else if (tx_cpu == 0) { + tx_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); + } else if (compl_cpu == 0) { + compl_cpu = cpumask_first(dhd->cpumask_secondary_new); + } + + /* If no CPU was available for tx processing, choose CPU 0 */ + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + + /* If no CPU was available for completion, choose CPU 0 */ + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 1; + compl_cpu = 0; + tx_cpu = 2; + } + + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); + + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(compl_cpu < nr_cpu_ids); + ASSERT(tx_cpu < nr_cpu_ids); + + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_compl_cpu, compl_cpu); + atomic_set(&dhd->rx_compl_cpu, compl_cpu); + atomic_set(&dhd->tx_cpu, tx_cpu); + + return; +} + +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + +int dhd_cpu_startup_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} + +int dhd_cpu_teardown_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} +#else +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned long int cpu = (unsigned long int)hcpu; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { + DHD_INFO(("%s(): LB data is not initialized yet.\n", + __FUNCTION__)); + return NOTIFY_BAD; + } + + switch (action) + { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + +int dhd_register_cpuhp_callback(dhd_info_t *dhd) +{ + int cpuhp_ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd", + dhd_cpu_startup_callback, dhd_cpu_teardown_callback); + + if (cpuhp_ret < 0) { + DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n", + __FUNCTION__, cpuhp_ret)); + } +#else + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */ +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + return cpuhp_ret; +} + +int dhd_unregister_cpuhp_callback(dhd_info_t *dhd) +{ + int ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* Don't want to call tear down while unregistering */ + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); +#else + if (dhd->cpu_notifier.notifier_call != NULL) { + unregister_cpu_notifier(&dhd->cpu_notifier); + } +#endif // endif + return ret; +} + +#if defined(DHD_LB_STATS) +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + + dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_percpu_run_cnt) { + DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + + dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_percpu_run_cnt) { + DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_percpu_run_cnt) { + DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_online_cnt) { + DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + + dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_offline_cnt) { + DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + + dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txp_percpu_run_cnt) { + DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + + dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->tx_start_percpu_run_cnt) { + DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_rx_hist[j]) { + DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } +#ifdef DHD_LB_TXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_hist[j]) { + DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->txc_hist[j][i]); + } + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_hist[j]) { + DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]); + } + } +#endif /* DHD_LB_RXC */ + return; +} + +void dhd_lb_stats_deinit(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + if (dhd->napi_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); + dhd->napi_percpu_run_cnt = NULL; + } + if (dhd->rxc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); + dhd->rxc_percpu_run_cnt = NULL; + } + if (dhd->txc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); + dhd->txc_percpu_run_cnt = NULL; + } + if (dhd->cpu_online_cnt) { + MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); + dhd->cpu_online_cnt = NULL; + } + if (dhd->cpu_offline_cnt) { + MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); + dhd->cpu_offline_cnt = NULL; + } + + if (dhd->txp_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); + dhd->txp_percpu_run_cnt = NULL; + } + if (dhd->tx_start_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); + dhd->tx_start_percpu_run_cnt = NULL; + } + + for (j = 0; j < HIST_BIN_SIZE; j++) { + if (dhd->napi_rx_hist[j]) { + MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); + dhd->napi_rx_hist[j] = NULL; + } +#ifdef DHD_LB_TXC + if (dhd->txc_hist[j]) { + MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size); + dhd->txc_hist[j] = NULL; + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (dhd->rxc_hist[j]) { + MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size); + dhd->rxc_hist[j] = NULL; + } +#endif /* DHD_LB_RXC */ + } + + return; +} + +void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp, + struct bcmstrbuf *strbuf, uint32 **hist) +{ + int i, j; + uint32 *per_cpu_total; + uint32 total = 0; + uint32 num_cpus = num_possible_cpus(); + + per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus); + if (!per_cpu_total) { + DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); + return; + } + bzero(per_cpu_total, sizeof(uint32) * num_cpus); + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<osh, per_cpu_total, sizeof(uint32) * num_cpus); + per_cpu_total = NULL; + } + return; +} + +void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i, num_cpus = num_possible_cpus(); + + bcm_bprintf(strbuf, "CPU: \t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); + +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_RXC + bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist); +#endif /* DHD_LB_RXC */ + +#ifdef DHD_LB_TXC + bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist); +#endif /* DHD_LB_TXC */ + +#ifdef DHD_LB_TXP + bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); + + bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); +#endif /* DHD_LB_TXP */ +} + +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static inline uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + + return (num + 1); +} + +void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) +{ + uint32 bin_power; + uint32 *p; + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 1: p = bin[0] + cpu; break; + case 2: p = bin[1] + cpu; break; + case 4: p = bin[2] + cpu; break; + case 8: p = bin[3] + cpu; break; + case 16: p = bin[4] + cpu; break; + case 32: p = bin[5] + cpu; break; + case 64: p = bin[6] + cpu; break; + case 128: p = bin[7] + cpu; break; + default : p = bin[8] + cpu; break; + } + + *p = *p + 1; + return; +} + +void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); + + return; +} + +void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); + + return; +} + +void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); + + return; +} + +void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} +#endif /* DHD_LB_STATS */ + +#endif /* DHD_LB */ +#if defined(DHD_LB) +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} +/** + * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU + * @tasklet: tasklet to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * smp_call_function_single with no wait and the tasklet_schedule function + * will be invoked to schedule the specified tasklet on the requested CPU. + */ +INLINE void +dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) +{ + const int wait = 0; + smp_call_function_single(on_cpu, + dhd_tasklet_schedule, (void *)tasklet, wait); +} + +/** + * dhd_work_schedule_on - Executes the passed work in a given CPU + * @work: work to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * schedule_work_on and the work function + * will be invoked to schedule the specified work on the requested CPU. + */ + +INLINE void +dhd_work_schedule_on(struct work_struct *work, int on_cpu) +{ + schedule_work_on(on_cpu, work); +} + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet + * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and + * freeing the packets placed in the tx_compl workq + */ +void +dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->txc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->tx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + } else { + schedule_work(&dhd->tx_compl_dispatcher_work); + } +} + +static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); + put_online_cpus(); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +/** + * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet + * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers + * in the H2D RxBuffer Post common ring, by using the recycled pktids that were + * placed in the rx_compl workq. + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + on_cpu = atomic_read(&dhd->rx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + } else { + schedule_work(&dhd->rx_compl_dispatcher_work); + } +} + +void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->rx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + else { + dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); + } + put_online_cpus(); +} +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_TXP) +void dhd_tx_dispatcher_work(struct work_struct * work) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dhd_tasklet_schedule(&dhd->tx_tasklet); +} + +void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp) +{ + int cpu; + int net_tx_cpu; + dhd_info_t *dhd = dhdp->info; + + preempt_disable(); + cpu = atomic_read(&dhd->tx_cpu); + net_tx_cpu = atomic_read(&dhd->net_tx_cpu); + + /* + * Now if the NET_TX has pushed the packet in the same + * CPU that is chosen for Tx processing, seperate it out + * i.e run the TX processing tasklet in compl_cpu + */ + if (net_tx_cpu == cpu) + cpu = atomic_read(&dhd->tx_compl_cpu); + + if (!cpu_online(cpu)) { + /* + * Ooohh... but the Chosen CPU is not online, + * Do the job in the current CPU itself. + */ + dhd_tasklet_schedule(&dhd->tx_tasklet); + } else { + /* + * Schedule tx_dispatcher_work to on the cpu which + * in turn will schedule tx_tasklet. + */ + dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu); + } + preempt_enable(); +} + +/** + * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet + * on another cpu. The tx_tasklet will take care of actually putting + * the skbs into appropriate flow ring and ringing H2D interrupt + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_tx_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + + curr_cpu = get_cpu(); + put_cpu(); + + /* Record the CPU in which the TX request from Network stack came */ + atomic_set(&dhd->net_tx_cpu, curr_cpu); + + /* Schedule the work to dispatch ... */ + dhd_tx_dispatcher_fn(dhdp); +} +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + struct sk_buff_head rx_process_queue; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd = container_of(napi, struct dhd_info, rx_napi_struct); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + __skb_queue_head_init(&rx_process_queue); + + /* extract the entire rx_napi_queue into local rx_process_queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); + napi_complete(napi); + + return budget - 1; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + __napi_schedule(&dhd->rx_napi_struct); +#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE + raise_softirq(NET_RX_SOFTIRQ); +#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */ + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } + + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + return 0; +} + +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can + * run from Work Queue context we have to call these functions + */ +void dhd_rx_napi_dispatcher_fn(struct work_struct * work) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_napi_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + dhd_napi_schedule(dhd); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ +void +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) +{ + unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int on_cpu; +#ifdef DHD_LB_IRQSET + cpumask_t cpus; +#endif /* DHD_LB_IRQSET */ + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); + + /* append the producer's queue of packets to the napi's rx process queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); + + /* if LB RXP is disabled directly schedule NAPI */ + if (atomic_read(&dhd->lb_rxp_active) == 0) { + dhd_napi_schedule(dhd); + return; + } + + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + preempt_disable(); + on_cpu = atomic_read(&dhd->rx_napi_cpu); +#ifdef DHD_LB_IRQSET + if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) || + (!cpu_online(on_cpu))) +#else + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) +#endif /* DHD_LB_IRQSET */ + { + DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__, + curr_cpu, *cpumask_bits(dhd->cpumask_primary))); + dhd_napi_schedule(dhd); + } else { + DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n", + __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary))); + dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu); + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + } + preempt_enable(); +} + +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + + DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); +} +#endif /* DHD_LB_RXP */ +#endif /* DHD_LB */ + +#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +void +dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask) +{ + unsigned int irq = (unsigned int)-1; + int err = BCME_OK; + + if (!dhdp) { + DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n", + __FUNCTION__, *cpumask_bits(cpumask))); + + dhdpcie_get_pcieirq(dhdp->bus, &irq); + err = irq_set_affinity(irq, cpumask); + if (err) + DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n", + __FUNCTION__, *cpumask_bits(cpumask))); +} +#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + +#if defined(DHD_LB_TXP) + +int BCMFASTPATH +dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, + int ifidx, void *skb) +{ + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt); + + /* If the feature is disabled run-time do TX from here */ + if (atomic_read(&dhd->lb_txp_active) == 0) { + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + return __dhd_sendpkt(&dhd->pub, ifidx, skb); + } + + /* Store the address of net device and interface index in the Packet tag */ + DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net); + DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx); + + /* Enqueue the skb into tx_pend_queue */ + skb_queue_tail(&dhd->tx_pend_queue, skb); + + DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net)); + + /* Dispatch the Tx job to be processed by the tx_tasklet */ + dhd_lb_tx_dispatch(&dhd->pub); + + return NETDEV_TX_OK; +} +#endif /* DHD_LB_TXP */ + +#ifdef DHD_LB_TXP +#define DHD_LB_TXBOUND 64 +/* + * Function that performs the TX processing on a given CPU + */ +bool +dhd_lb_tx_process(dhd_info_t *dhd) +{ + struct sk_buff *skb; + int cnt = 0; + struct net_device *net; + int ifidx; + bool resched = FALSE; + + DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__)); + if (dhd == NULL) { + DHD_ERROR((" Null pointer DHD \r\n")); + return resched; + } + + BCM_REFERENCE(net); + + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + + /* Base Loop to perform the actual Tx */ + do { + skb = skb_dequeue(&dhd->tx_pend_queue); + if (skb == NULL) { + DHD_TRACE(("Dequeued a Null Packet \r\n")); + break; + } + cnt++; + + net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + + DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb, + net, ifidx)); + + __dhd_sendpkt(&dhd->pub, ifidx, skb); + + if (cnt >= DHD_LB_TXBOUND) { + resched = TRUE; + break; + } + + } while (1); + + DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt)); + + return resched; +} + +void +dhd_lb_tx_handler(unsigned long data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + if (dhd_lb_tx_process(dhd)) { + dhd_tasklet_schedule(&dhd->tx_tasklet); + } +} + +#endif /* DHD_LB_TXP */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_pktdump.c b/bcmdhd.100.10.315.x/dhd_linux_pktdump.c new file mode 100644 index 0000000..254d696 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_pktdump.c @@ -0,0 +1,1356 @@ +/* + * Packet dump helper functions + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_pktdump.c 820929 2019-05-21 14:09:11Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DHD_PKTDUMP(arg) DHD_ERROR(arg) +#define DHD_PKTDUMP_MEM(arg) DHD_ERROR(arg) +#define PACKED_STRUCT __attribute__ ((packed)) + +#define EAPOL_HDR_LEN 4 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* EAPOL-Key header field size */ +#define AKW_BLOCK_LEN 8 +#define WPA_KEY_REPLAY_LEN 8 +#define WPA_KEY_NONCE_LEN 32 +#define WPA_KEY_IV_LEN 16 +#define WPA_KEY_RSC_LEN 8 +#define WPA_KEY_ID_LEN 8 +#define WPA_KEY_MIC_LEN 16 +#define WPA_MAX_KEY_SIZE 32 +#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) + +/* Key information bit */ +#define KEYINFO_TYPE_MASK (1 << 3) +#define KEYINFO_INSTALL_MASK (1 << 6) +#define KEYINFO_KEYACK_MASK (1 << 7) +#define KEYINFO_KEYMIC_MASK (1 << 8) +#define KEYINFO_SECURE_MASK (1 << 9) +#define KEYINFO_ERROR_MASK (1 << 10) +#define KEYINFO_REQ_MASK (1 << 11) + +/* EAP Code */ +#define EAP_CODE_REQUEST 1 /* Request */ +#define EAP_CODE_RESPONSE 2 /* Response */ +#define EAP_CODE_SUCCESS 3 /* Success */ +#define EAP_CODE_FAILURE 4 /* Failure */ + +/* EAP Type */ +#define EAP_TYPE_RSVD 0 /* Reserved */ +#define EAP_TYPE_IDENT 1 /* Identify */ +#define EAP_TYPE_NOTI 2 /* Notification */ +#define EAP_TYPE_TLS 13 /* EAP-TLS */ +#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */ +#define EAP_TYPE_TTLS 21 /* EAP-TTLS */ +#define EAP_TYPE_AKA 23 /* EAP-AKA */ +#define EAP_TYPE_PEAP 25 /* EAP-PEAP */ +#define EAP_TYPE_FAST 43 /* EAP-FAST */ +#define EAP_TYPE_PSK 47 /* EAP-PSK */ +#define EAP_TYPE_AKAP 50 /* EAP-AKA' */ +#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */ + +/* WSC */ +#define EAP_HDR_LEN 5 +#define EAP_WSC_NONCE_OFFSET 10 +#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data)) +#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET)) +#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */ +#define WFA_VID_LEN 3 /* WFA VID length */ +#define WFA_VTYPE 1u /* WFA Vendor type */ + +/* WSC opcode */ +#define WSC_OPCODE_UPNP 0 +#define WSC_OPCODE_START 1 +#define WSC_OPCODE_ACK 2 +#define WSC_OPCODE_NACK 3 +#define WSC_OPCODE_MSG 4 +#define WSC_OPCODE_DONE 5 +#define WSC_OPCODE_FRAG_ACK 6 + +/* WSC flag */ +#define WSC_FLAG_MF 1 /* more fragements */ +#define WSC_FLAG_LF 2 /* length field */ + +/* WSC message code */ +#define WSC_ATTR_MSG 0x1022 +#define WSC_MSG_M1 0x04 +#define WSC_MSG_M2 0x05 +#define WSC_MSG_M3 0x07 +#define WSC_MSG_M4 0x08 +#define WSC_MSG_M5 0x09 +#define WSC_MSG_M6 0x0A +#define WSC_MSG_M7 0x0B +#define WSC_MSG_M8 0x0C + +/* Debug prints */ +typedef enum pkt_cnt_type { + PKT_CNT_TYPE_INVALID = 0, + PKT_CNT_TYPE_ARP = 1, + PKT_CNT_TYPE_DNS = 2, + PKT_CNT_TYPE_MAX = 3 +} pkt_cnt_type_t; + +typedef struct pkt_cnt { + uint32 tx_cnt; + uint32 tx_err_cnt; + uint32 rx_cnt; +} pkt_cnt_t; + +typedef struct pkt_cnt_log { + bool enabled; + uint16 reason; + timer_list_compat_t pktcnt_timer; + pkt_cnt_t arp_cnt; + pkt_cnt_t dns_cnt; +} pkt_cnts_log_t; + +#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */ +#define PKT_CNT_RSN_VALID(rsn) \ + (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX))) + +static const char pkt_cnt_msg[][20] = { + "INVALID", + "ROAM_SUCCESS", + "GROUP_KEY_UPDATE", + "CONNECT_SUCCESS", + "INVALID" +}; + +static const char tx_pktfate[][30] = { + "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */ + "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */ + "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */ + "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */ + "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */ + "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */ + "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */ + "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */ + "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */ +}; + +#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x" +#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \ + ((const eapol_key_hdr_t *)(key))->replay[1], \ + ((const eapol_key_hdr_t *)(key))->replay[2], \ + ((const eapol_key_hdr_t *)(key))->replay[3], \ + ((const eapol_key_hdr_t *)(key))->replay[4], \ + ((const eapol_key_hdr_t *)(key))->replay[5], \ + ((const eapol_key_hdr_t *)(key))->replay[6], \ + ((const eapol_key_hdr_t *)(key))->replay[7] +#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s" +#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0)) +#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \ + (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER") +#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A") +#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0)) + +#define EAP_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \ + str TXFATE_FMT "\n", ifname, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \ + str "\n", ifname)); \ + } \ + } while (0) + +#define EAP_PRINT_REPLAY(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \ + str DBGREPLAY TXFATE_FMT "\n", ifname, \ + REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), \ + TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \ + str DBGREPLAY "\n", ifname, \ + REPLAY_FMT(eap_key))); \ + } \ + } while (0) + +#define EAP_PRINT_OTHER(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " \ + str "ver %d, type %d" TXFATE_FMT "\n", ifname, \ + eapol_hdr->version, eapol_hdr->type, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " \ + str "ver %d, type %d\n", ifname, \ + eapol_hdr->version, eapol_hdr->type)); \ + } \ + } while (0) + +#define EAP_PRINT_OTHER_4WAY(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [TX]: " str \ + "ver %d type %d keytype %d keyinfo 0x%02X" \ + TXFATE_FMT "\n", ifname, eapol_hdr->version, \ + eapol_hdr->type, eap_key->type, \ + (uint32)hton16(eap_key->key_info), \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[dhd-%s] ETHER_TYPE_802_1X [RX]: " str \ + "ver %d type %d keytype %d keyinfo 0x%02X\n", \ + ifname, eapol_hdr->version, eapol_hdr->type, \ + eap_key->type, (uint32)hton16(eap_key->key_info))); \ + } \ + } while (0) + +/* EAPOL header */ +typedef struct eapol_header { + struct ether_header eth; /* 802.3/Ethernet header */ + uint8 version; /* EAPOL protocol version */ + uint8 type; /* EAPOL type */ + uint16 length; /* Length of body */ + uint8 body[1]; /* Body (optional) */ +} PACKED_STRUCT eapol_header_t; + +/* EAP header */ +typedef struct eap_header_fmt { + uint8 code; + uint8 id; + uint16 len; + uint8 type; + uint8 data[1]; +} PACKED_STRUCT eap_header_fmt_t; + +/* WSC EAP format */ +typedef struct eap_wsc_fmt { + uint8 oui[3]; + uint32 ouitype; + uint8 opcode; + uint8 flags; + uint8 data[1]; +} PACKED_STRUCT eap_wsc_fmt_t; + +/* EAPOL-Key */ +typedef struct eapol_key_hdr { + uint8 type; /* Key Descriptor Type */ + uint16 key_info; /* Key Information (unaligned) */ + uint16 key_len; /* Key Length (unaligned) */ + uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */ + uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */ + uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */ + uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */ + uint16 data_len; /* Key Data Length */ + uint8 data[WPA_KEY_DATA_LEN]; /* Key data */ +} PACKED_STRUCT eapol_key_hdr_t; + +msg_eapol_t +dhd_is_4way_msg(uint8 *pktdata) +{ + eapol_header_t *eapol_hdr; + eapol_key_hdr_t *eap_key; + msg_eapol_t type = EAPOL_OTHER; + bool pair, ack, mic, kerr, req, sec, install; + uint16 key_info; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return type; + } + + eapol_hdr = (eapol_header_t *)pktdata; + eap_key = (eapol_key_hdr_t *)(eapol_hdr->body); + if (eap_key->type != EAPOL_WPA2_KEY) { + return type; + } + + key_info = hton16(eap_key->key_info); + pair = !!(key_info & KEYINFO_TYPE_MASK); + ack = !!(key_info & KEYINFO_KEYACK_MASK); + mic = !!(key_info & KEYINFO_KEYMIC_MASK); + kerr = !!(key_info & KEYINFO_ERROR_MASK); + req = !!(key_info & KEYINFO_REQ_MASK); + sec = !!(key_info & KEYINFO_SECURE_MASK); + install = !!(key_info & KEYINFO_INSTALL_MASK); + + if (pair && !install && ack && !mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M1; + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + type = EAPOL_4WAY_M2; + } else if (pair && ack && mic && sec && !kerr && !req) { + type = EAPOL_4WAY_M3; + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + type = EAPOL_4WAY_M4; + } else if (!pair && !install && ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M1; + } else if (!pair && !install && !ack && mic && sec && !req && !kerr) { + type = EAPOL_GROUPKEY_M2; + } else { + type = EAPOL_OTHER; + } + + return type; +} + +void +dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, + bool tx, uint32 *pkthash, uint16 *pktfate) +{ + struct ether_header *eh; + uint16 ether_type; + + if (!pktdata || pktlen < ETHER_HDR_LEN) { + return; + } + + eh = (struct ether_header *)pktdata; + ether_type = ntoh16(eh->ether_type); + if (ether_type == ETHER_TYPE_802_1X) { + dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen, + tx, pkthash, pktfate); + } + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } + if (ntoh16(eh->ether_type) == ETHER_TYPE_ARP) { + dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate); + } + dhd_trx_pkt_dump(dhdp, ifidx, pktdata, pktlen, tx); +} + +#ifdef DHD_PKTDUMP_ROAM +static void +dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) +{ + pkt_cnts_log_t *pktcnts; + pkt_cnt_t *cnt; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + if (!pktcnts) { + DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__)); + return; + } + + if (!pktcnts->enabled || (tx && !pktfate)) { + return; + } + + if (pkttype == PKT_CNT_TYPE_ARP) { + cnt = (pkt_cnt_t *)&pktcnts->arp_cnt; + } else if (pkttype == PKT_CNT_TYPE_DNS) { + cnt = (pkt_cnt_t *)&pktcnts->dns_cnt; + } else { + /* invalid packet type */ + return; + } + + if (tx) { + TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++; + } else { + cnt->rx_cnt++; + } +} + +static void +dhd_dump_pkt_timer(unsigned long data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + + pktcnts->enabled = FALSE; + + /* print out the packet counter value */ + DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n")); + DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason])); + DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS)); + DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n", + pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt, + pktcnts->arp_cnt.rx_cnt)); + DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n", + pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt, + pktcnts->dns_cnt.rx_cnt)); + DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n")); +} + +void +dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n", + __FUNCTION__)); + return; + } + + if (!PKT_CNT_RSN_VALID(rsn)) { + DHD_ERROR(("%s: invalid reason code %d\n", + __FUNCTION__, rsn)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + if (timer_pending(&pktcnts->pktcnt_timer)) { + del_timer_sync(&pktcnts->pktcnt_timer); + } + + bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t)); + bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t)); + pktcnts->reason = rsn; + pktcnts->enabled = TRUE; + mod_timer(&pktcnts->pktcnt_timer, + jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS)); + DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n", + __FUNCTION__, rsn)); +} + +void +dhd_dump_pkt_init(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t)); + if (!pktcnts) { + DHD_ERROR(("%s: failed to allocate memory for pktcnts\n", + __FUNCTION__)); + return; + } + + /* init timers */ + init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp); + dhdp->pktcnts = pktcnts; +} + +void +dhd_dump_pkt_deinit(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + pktcnts->enabled = FALSE; + del_timer_sync(&pktcnts->pktcnt_timer); + MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t)); + dhdp->pktcnts = NULL; +} + +void +dhd_dump_pkt_clear(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__)); + return; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + pktcnts->enabled = FALSE; + del_timer_sync(&pktcnts->pktcnt_timer); + pktcnts->reason = 0; + bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t)); + bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t)); +} + +bool +dhd_dump_pkt_enabled(dhd_pub_t *dhdp) +{ + pkt_cnts_log_t *pktcnts; + + if (!dhdp || !dhdp->pktcnts) { + return FALSE; + } + + pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts); + + return pktcnts->enabled; +} +#else +static INLINE void +dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { } +static INLINE bool +dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; } +#endif /* DHD_PKTDUMP_ROAM */ + +#ifdef DHD_8021X_DUMP +static void +dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eap_header_fmt_t *eap_hdr; + eap_wsc_fmt_t *eap_wsc; + char *ifname; + uint16 eap_len; + bool cond; + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) { + DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__)); + return; + } + + eapol_hdr = (eapol_header_t *)pktdata; + eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body); + if (eap_hdr->type != EAP_TYPE_EXP) { + return; + } + + eap_len = ntoh16(eap_hdr->len); + if (eap_len < EAP_WSC_MIN_DATA_LEN) { + return; + } + + eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data); + if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) || + (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) { + return; + } + + if (eap_wsc->flags) { + return; + } + + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_wsc->opcode == WSC_OPCODE_MSG) { + const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data); + const uint8 *msg; + uint16 msglen; + uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET); + bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE; + + msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len, + WSC_ATTR_MSG, &msglen, opt); + if (msg && msglen) { + switch (*msg) { + case WSC_MSG_M1: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1; + DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M1"); + break; + case WSC_MSG_M2: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2; + DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M2"); + break; + case WSC_MSG_M3: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3; + DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M3"); + break; + case WSC_MSG_M4: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4; + DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M4"); + break; + case WSC_MSG_M5: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5; + DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M5"); + break; + case WSC_MSG_M6: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6; + DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M6"); + break; + case WSC_MSG_M7: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7; + DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M7"); + break; + case WSC_MSG_M8: + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8; + DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WPS M8"); + break; + default: + break; + } + } + } else if (eap_wsc->opcode == WSC_OPCODE_START) { + dhd->conf->eapol_status = EAPOL_STATUS_WSC_START; + DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WSC Start"); + } else if (eap_wsc->opcode == WSC_OPCODE_DONE) { + dhd->conf->eapol_status = EAPOL_STATUS_WSC_DONE; + DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond); + EAP_PRINT("EAP Packet, WSC Done"); + } +} + +static void +dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eap_header_fmt_t *eap_hdr; + char *ifname; + bool cond; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + eapol_hdr = (eapol_header_t *)pktdata; + eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body); + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_hdr->code == EAP_CODE_REQUEST || + eap_hdr->code == EAP_CODE_RESPONSE) { + bool isreq = (eap_hdr->code == EAP_CODE_REQUEST); + switch (eap_hdr->type) { + case EAP_TYPE_IDENT: + if (isreq) { + dhd->conf->eapol_status = EAPOL_STATUS_REQID; + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, Identity"); + } else { + dhd->conf->eapol_status = EAPOL_STATUS_RSPID; + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, Identity"); + } + break; + case EAP_TYPE_TLS: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, TLS"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, TLS"); + } + break; + case EAP_TYPE_LEAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, LEAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, LEAP"); + } + break; + case EAP_TYPE_TTLS: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, TTLS"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, TTLS"); + } + break; + case EAP_TYPE_AKA: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, AKA"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, AKA"); + } + break; + case EAP_TYPE_PEAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, PEAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, PEAP"); + } + break; + case EAP_TYPE_FAST: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, FAST"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, FAST"); + } + break; + case EAP_TYPE_PSK: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, PSK"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, PSK"); + } + break; + case EAP_TYPE_AKAP: + if (isreq) { + DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Request, AKAP"); + } else { + DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Response, AKAP"); + } + break; + case EAP_TYPE_EXP: + dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx, + pkthash, pktfate); + break; + default: + break; + } + } else if (eap_hdr->code == EAP_CODE_SUCCESS) { + DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Success"); + } else if (eap_hdr->code == EAP_CODE_FAILURE) { + DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond); + EAP_PRINT("EAP Packet, Failure"); + } +} + +static void +dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + eapol_header_t *eapol_hdr; + eapol_key_hdr_t *eap_key; + msg_eapol_t type; + char *ifname; + bool cond; + + if (!pktdata) { + DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + type = dhd_is_4way_msg(pktdata); + ifname = dhd_ifname(dhd, ifidx); + eapol_hdr = (eapol_header_t *)pktdata; + eap_key = (eapol_key_hdr_t *)(eapol_hdr->body); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eap_key->type != EAPOL_WPA2_KEY) { + EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY"); + return; + } + + switch (type) { + case EAPOL_4WAY_M1: + dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M1; + DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M1"); + break; + case EAPOL_4WAY_M2: + dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M2; + DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M2"); + break; + case EAPOL_4WAY_M3: + dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M3; + DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M3"); + break; + case EAPOL_4WAY_M4: + dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M4; + DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond); + EAP_PRINT("EAPOL Packet, 4-way handshake, M4"); + break; + case EAPOL_GROUPKEY_M1: + DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond); + EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1"); + break; + case EAPOL_GROUPKEY_M2: + DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond); + EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2"); + if (ifidx == 0 && tx && pktfate) { + dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP); + } + break; + default: + DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond); + EAP_PRINT_OTHER("OTHER 4WAY"); + break; + } +} + +void +dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) +{ + char *ifname; + eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata; + bool cond; + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + eapol_hdr = (eapol_header_t *)pktdata; + ifname = dhd_ifname(dhd, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + + if (eapol_hdr->type == EAP_PACKET) { + dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx, + pkthash, pktfate); + } else if (eapol_hdr->type == EAPOL_START) { + DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond); + EAP_PRINT("EAP Packet, EAPOL-Start"); + } else if (eapol_hdr->type == EAPOL_KEY) { + dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx, + pkthash, pktfate); + } else { + DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond); + EAP_PRINT_OTHER("OTHER 8021X"); + } +} +#endif /* DHD_8021X_DUMP */ + +#ifdef DHD_DHCP_DUMP +#define BOOTP_CHADDR_LEN 16 +#define BOOTP_SNAME_LEN 64 +#define BOOTP_FILE_LEN 128 +#define BOOTP_MIN_DHCP_OPT_LEN 312 +#define BOOTP_MAGIC_COOKIE_LEN 4 + +#define DHCP_MSGTYPE_DISCOVER 1 +#define DHCP_MSGTYPE_OFFER 2 +#define DHCP_MSGTYPE_REQUEST 3 +#define DHCP_MSGTYPE_DECLINE 4 +#define DHCP_MSGTYPE_ACK 5 +#define DHCP_MSGTYPE_NAK 6 +#define DHCP_MSGTYPE_RELEASE 7 +#define DHCP_MSGTYPE_INFORM 8 + +#define DHCP_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP(("[dhd-%s] "str " %s[%s] [TX] -" TXFATE_FMT "\n", \ + ifname, typestr, opstr, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP(("[dhd-%s] "str" %s[%s] [RX]\n", \ + ifname, typestr, opstr)); \ + } \ + } while (0) + +typedef struct bootp_fmt { + struct ipv4_hdr iph; + struct bcmudp_hdr udph; + uint8 op; + uint8 htype; + uint8 hlen; + uint8 hops; + uint32 transaction_id; + uint16 secs; + uint16 flags; + uint32 client_ip; + uint32 assigned_ip; + uint32 server_ip; + uint32 relay_ip; + uint8 hw_address[BOOTP_CHADDR_LEN]; + uint8 server_name[BOOTP_SNAME_LEN]; + uint8 file_name[BOOTP_FILE_LEN]; + uint8 options[BOOTP_MIN_DHCP_OPT_LEN]; +} PACKED_STRUCT bootp_fmt_t; + +static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; +static char dhcp_ops[][10] = { + "NA", "REQUEST", "REPLY" +}; +static char dhcp_types[][10] = { + "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" +}; + +static const int dhcp_types_stat[9] = { + ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST), + ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE), + ST(DHCP_INFORM) +}; + +void +dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &b->iph; + uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len); + int dhcp_type = 0, len, opt_len; + char *ifname = NULL, *typestr = NULL, *opstr = NULL; + bool cond; + + /* check IP header */ + if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) || + IP_VER(iph) != IP_VER_4 || + IPV4_PROT(iph) != IP_PROT_UDP) { + return; + } + + /* check UDP port for bootp (67, 68) */ + if (b->udph.src_port != htons(DHCP_PORT_SERVER) && + b->udph.src_port != htons(DHCP_PORT_CLIENT) && + b->udph.dst_port != htons(DHCP_PORT_SERVER) && + b->udph.dst_port != htons(DHCP_PORT_CLIENT)) { + return; + } + + /* check header length */ + if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) { + return; + } + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr); + opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) - + sizeof(struct bcmudp_hdr) - sizeof(b->options)); + + /* parse bootp options */ + if (opt_len >= BOOTP_MAGIC_COOKIE_LEN && + !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) { + ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN]; + while (ptr < end && *ptr != 0xff) { + opt = ptr++; + if (*opt == 0) { + continue; + } + ptr += *ptr + 1; + if (ptr >= end) { + break; + } + if (*opt == DHCP_OPT_MSGTYPE) { + if (opt[1]) { + dhcp_type = opt[2]; + typestr = dhcp_types[dhcp_type]; + opstr = dhcp_ops[b->op]; + DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type], + ifidx, tx, cond); + DHCP_PRINT("DHCP"); + break; + } + } + } + } +} +#endif /* DHD_DHCP_DUMP */ + +#ifdef DHD_ICMP_DUMP +#define ICMP_TYPE_DEST_UNREACH 3 +#define ICMP_ECHO_SEQ_OFFSET 6 +#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET))) +#define ICMP_PING_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) SEQNUM=%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) SEQNUM=%d\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum)); \ + } \ + } while (0) + +#define ICMP_PRINT(str) \ + do { \ + if (tx) { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) TYPE=%d, CODE=%d" \ + TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) TYPE=%d," \ + " CODE=%d\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code)); \ + } \ + } while (0) + +void +dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt; + struct bcmicmp_hdr *icmph; + char *ifname; + bool cond; + uint16 seqnum, type, code; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + /* check IP header */ + if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) || + IP_VER(iph) != IP_VER_4 || + IPV4_PROT(iph) != IP_PROT_ICMP) { + return; + } + + /* check header length */ + if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) { + return; + } + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr)); + seqnum = 0; + type = icmph->type; + code = icmph->code; + bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf); + bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf); + if (type == ICMP_TYPE_ECHO_REQUEST) { + seqnum = ntoh16(ICMP_ECHO_SEQ(icmph)); + DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond); + ICMP_PING_PRINT("PING REQUEST"); + } else if (type == ICMP_TYPE_ECHO_REPLY) { + seqnum = ntoh16(ICMP_ECHO_SEQ(icmph)); + DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond); + ICMP_PING_PRINT("PING REPLY "); + } else if (type == ICMP_TYPE_DEST_UNREACH) { + DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond); + ICMP_PRINT("ICMP DEST UNREACH"); + } else { + DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond); + ICMP_PRINT("ICMP OTHER"); + } +} +#endif /* DHD_ICMP_DUMP */ + +#ifdef DHD_ARP_DUMP +#define ARP_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)" TXFATE_FMT "\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)" TXFATE_FMT "\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s)\n", \ + ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \ + tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \ + } \ + } while (0) \ + +#define ARP_PRINT_OTHER(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[dhd-%s] "str " [TX] op_code=%d" \ + TXFATE_FMT "\n", ifname, opcode, \ + TX_PKTHASH(pkthash), \ + TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] op_code=%d" \ + TXFATE_FMT "\n", ifname, opcode, \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] op_code=%d\n", \ + ifname, opcode)); \ + } \ + } while (0) + +void +dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct bcmarp *arph = (struct bcmarp *)pkt; + char *ifname; + uint16 opcode; + bool cond, dump_enabled; + char sabuf[20]="", dabuf[20]=""; + char seabuf[ETHER_ADDR_STR_LEN]=""; + char deabuf[ETHER_ADDR_STR_LEN]=""; + + /* validation check */ + if (arph->htype != hton16(HTYPE_ETHERNET) || + arph->hlen != ETHER_ADDR_LEN || + arph->plen != 4) { + return; + } + + ifname = dhd_ifname(dhdp, ifidx); + opcode = ntoh16(arph->oper); + cond = (tx && pktfate) ? FALSE : TRUE; + dump_enabled = dhd_dump_pkt_enabled(dhdp); + bcm_ip_ntoa((struct ipv4_addr *)arph->src_ip, sabuf); + bcm_ip_ntoa((struct ipv4_addr *)arph->dst_ip, dabuf); + bcm_ether_ntoa((struct ether_addr *)arph->dst_eth, deabuf); + bcm_ether_ntoa((struct ether_addr *)arph->src_eth, seabuf); + if (opcode == ARP_OPC_REQUEST) { + DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond); + ARP_PRINT("ARP REQUEST "); + } else if (opcode == ARP_OPC_REPLY) { + DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond); + ARP_PRINT("ARP RESPONSE"); + } else { + ARP_PRINT_OTHER("ARP OTHER"); + } + + if (ifidx == 0) { + dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP); + } +} +#endif /* DHD_ARP_DUMP */ + +#ifdef DHD_DNS_DUMP +typedef struct dns_fmt { + struct ipv4_hdr iph; + struct bcmudp_hdr udph; + uint16 id; + uint16 flags; + uint16 qdcount; + uint16 ancount; + uint16 nscount; + uint16 arcount; +} PACKED_STRUCT dns_fmt_t; + +#define UDP_PORT_DNS 53 +#define DNS_QR_LOC 15 +#define DNS_OPCODE_LOC 11 +#define DNS_RCODE_LOC 0 +#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC)) +#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC)) +#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC)) +#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC)) +#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC)) +#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC)) +#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6))) + +static const char dns_opcode_types[][11] = { + "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE" +}; + +#define DNSOPCODE(op) \ + (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op]) + +#define DNS_REQ_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s" \ + TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s" \ + TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \ + TX_PKTHASH(pkthash), TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] ID:0x%04X OPCODE:%s\n", \ + ifname, id, DNSOPCODE(opcode))); \ + } \ + } while (0) + +#define DNS_RESP_PRINT(str) \ + do { \ + if (tx) { \ + if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \ + DHD_PKTDUMP(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s RCODE:%d" \ + TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \ + GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \ + TX_FATE(pktfate))); \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] ID:0x%04X OPCODE:%s RCODE:%d" \ + TXFATE_FMT "\n", ifname, id, DNSOPCODE(opcode), \ + GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), \ + TX_FATE(pktfate))); \ + } \ + } else { \ + DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] ID:0x%04X OPCODE:%s RCODE:%d\n", \ + ifname, id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \ + } \ + } while (0) + +void +dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate) +{ + dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN]; + struct ipv4_hdr *iph = &dnsh->iph; + uint16 flags, opcode, id; + char *ifname; + bool cond, dump_enabled; + + /* check IP header */ + if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) || + IP_VER(iph) != IP_VER_4 || + IPV4_PROT(iph) != IP_PROT_UDP) { + return; + } + + /* check UDP port for DNS */ + if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) && + dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) { + return; + } + + /* check header length */ + if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) + + sizeof(struct bcmudp_hdr))) { + return; + } + + ifname = dhd_ifname(dhdp, ifidx); + cond = (tx && pktfate) ? FALSE : TRUE; + dump_enabled = dhd_dump_pkt_enabled(dhdp); + flags = hton16(dnsh->flags); + opcode = GET_DNS_OPCODE(flags); + id = hton16(dnsh->id); + if (GET_DNS_QR(flags)) { + /* Response */ + DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond); + DNS_RESP_PRINT("DNS RESPONSE"); + } else { + /* Request */ + DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond); + DNS_REQ_PRINT("DNS REQUEST"); + } + + if (ifidx == 0) { + dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS); + } +} +#endif /* DHD_DNS_DUMP */ + +#ifdef DHD_TRX_DUMP +void +dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, bool tx) +{ + struct ether_header *eh; + uint16 protocol; + char *pkttype = "UNKNOWN"; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!pktdata) { + DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__)); + return; + } + + eh = (struct ether_header *)pktdata; + protocol = hton16(eh->ether_type); + BCM_REFERENCE(pktlen); + + switch (protocol) { + case ETHER_TYPE_IP: + pkttype = "IP"; + break; + case ETHER_TYPE_ARP: + pkttype = "ARP"; + break; + case ETHER_TYPE_BRCM: + pkttype = "BRCM"; + break; + case ETHER_TYPE_802_1X: + pkttype = "802.1X"; + break; + case ETHER_TYPE_WAI: + pkttype = "WAPI"; + break; + default: + break; + } + + if (protocol != ETHER_TYPE_BRCM) { + if (pktdata[0] == 0xFF) { + DHD_PKTDUMP(("[dhd-%s] %s BROADCAST DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } else if (pktdata[0] & 1) { + DHD_PKTDUMP(("[dhd-%s] %s MULTICAST DUMP " MACDBG " - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", MAC2STRDBG(pktdata), pkttype)); + } else { + DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } +#ifdef DHD_RX_FULL_DUMP + prhex("Data", pktdata, pktlen); +#endif /* DHD_RX_FULL_DUMP */ + } + else { + DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n", + dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype)); + } +} +#endif /* DHD_RX_DUMP */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_pktdump.h b/bcmdhd.100.10.315.x/dhd_linux_pktdump.h new file mode 100644 index 0000000..cac2af2 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_pktdump.h @@ -0,0 +1,124 @@ +/* + * Header file for the Packet dump helper functions + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_pktdump.h 820929 2019-05-21 14:09:11Z $ + */ + +#ifndef __DHD_LINUX_PKTDUMP_H_ +#define __DHD_LINUX_PKTDUMP_H_ + +#include +#include + +typedef enum { + EAPOL_OTHER = 0, + EAPOL_4WAY_M1, + EAPOL_4WAY_M2, + EAPOL_4WAY_M3, + EAPOL_4WAY_M4, + EAPOL_GROUPKEY_M1, + EAPOL_GROUPKEY_M2 +} msg_eapol_t; + +typedef enum pkt_cnt_rsn { + PKT_CNT_RSN_INVALID = 0, + PKT_CNT_RSN_ROAM = 1, + PKT_CNT_RSN_GRPKEY_UP = 2, + PKT_CNT_RSN_CONNECT = 3, + PKT_CNT_RSN_MAX = 4 +} pkt_cnt_rsn_t; + +extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata); +extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, + uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate); + +#ifdef DHD_PKTDUMP_ROAM +extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn); +extern void dhd_dump_pkt_init(dhd_pub_t *dhdp); +extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp); +extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp); +#else +static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { } +static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { } +static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { } +static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { } +#endif /* DHD_PKTDUMP_ROAM */ + +/* Rx packet dump */ +#ifdef DHD_TRX_DUMP +extern void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx); +#else +static INLINE void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx) { } +#endif /* DHD_TRX_DUMP */ + +/* DHCP packet dump */ +#ifdef DHD_DHCP_DUMP +extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_DHCP_DUMP */ + +/* DNS packet dump */ +#ifdef DHD_DNS_DUMP +extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_DNS_DUMP */ + +/* ICMP packet dump */ +#ifdef DHD_ICMP_DUMP +extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_ICMP_DUMP */ + +/* ARP packet dump */ +#ifdef DHD_ARP_DUMP +extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx, + uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, + uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_ARP_DUMP */ + +/* 802.1X packet dump */ +#ifdef DHD_8021X_DUMP +extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate); +#else +static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, + uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { } +#endif /* DHD_8021X_DUMP */ + +#endif /* __DHD_LINUX_PKTDUMP_H_ */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_platdev.c b/bcmdhd.100.10.315.x/dhd_linux_platdev.c index af392fc..e68732b 100644 --- a/bcmdhd.100.10.315.x/dhd_linux_platdev.c +++ b/bcmdhd.100.10.315.x/dhd_linux_platdev.c @@ -1,7 +1,7 @@ /* * Linux platform device for DHD WLAN adapter * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_linux_platdev.c 765775 2018-06-05 10:10:56Z $ + * $Id: dhd_linux_platdev.c 805835 2019-02-20 12:35:44Z $ */ #include #include @@ -223,7 +223,7 @@ int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long } plat_data = adapter->wifi_plat_data; - DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec)); if (plat_data->set_power) { #ifdef ENABLE_4335BT_WAR if (on) { @@ -313,11 +313,11 @@ wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) DHD_TRACE(("%s\n", __FUNCTION__)); if (plat_data->get_country_code) { -#ifdef CUSTOM_COUNTRY_CODE +#ifdef CUSTOM_FORCE_NODFS_FLAG return plat_data->get_country_code(ccode, flags); #else return plat_data->get_country_code(ccode); -#endif /* CUSTOM_COUNTRY_CODE */ +#endif /* CUSTOM_FORCE_NODFS_FLAG */ } #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ @@ -922,6 +922,9 @@ fail: wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); wifi_platform_bus_enumerate(adapter, FALSE); } +#else + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); #endif // endif return err; @@ -936,11 +939,13 @@ static int dhd_wifi_platform_load_sdio(void) #ifdef BCMDBUS static int dhd_wifi_platform_load_usb(void) { + int err = 0; +#if !defined(DHD_PRELOAD) wifi_adapter_info_t *adapter; s32 timeout = -1; int i; - int err = 0; enum wifi_adapter_status wait_status; +#endif err = dhd_bus_register(); if (err) { @@ -948,6 +953,7 @@ static int dhd_wifi_platform_load_usb(void) goto exit; } +#if !defined(DHD_PRELOAD) /* power up all adapters */ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { adapter = &dhd_wifi_platdata->adapters[i]; @@ -974,10 +980,12 @@ static int dhd_wifi_platform_load_usb(void) goto fail; } } +#endif exit: return err; +#if !defined(DHD_PRELOAD) fail: dhd_bus_unregister(); /* power down all adapters */ @@ -987,6 +995,7 @@ fail: } return err; +#endif } #else /* BCMDBUS */ static int dhd_wifi_platform_load_usb(void) diff --git a/bcmdhd.100.10.315.x/dhd_linux_priv.h b/bcmdhd.100.10.315.x/dhd_linux_priv.h new file mode 100644 index 0000000..c317fca --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_priv.h @@ -0,0 +1,431 @@ +/* + * DHD Linux header file - contains private structure definition of the Linux specific layer + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_priv.h 815919 2019-04-22 09:06:50Z $ + */ + +#ifndef __DHD_LINUX_PRIV_H__ +#define __DHD_LINUX_PRIV_H__ + +#include + +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ +#include +#include +#ifdef CONFIG_COMPAT +#include +#endif /* CONFIG COMPAT */ +#include +#include +#include +#include +#include +#include + +#ifdef PCIE_FULL_DONGLE +#include +#include +#endif /* PCIE_FULL_DONGLE */ + +/* + * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c + * Local private structure (extension of pub) + */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + dhd_pub_t pub; + /* for supporting multiple interfaces. + * static_ifs hold the net ifaces without valid FW IF + */ + dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; + + wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */ + char fw_path[PATH_MAX]; /* path to firmware image */ + char nv_path[PATH_MAX]; /* path to nvram vars file */ + char clm_path[PATH_MAX]; /* path to clm vars file */ + char conf_path[PATH_MAX]; /* path to config vars file */ +#ifdef DHD_UCODE_DOWNLOAD + char uc_path[PATH_MAX]; /* path to ucode image */ +#endif /* DHD_UCODE_DOWNLOAD */ + + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; + +#ifdef BCMDBUS + ulong wlfc_lock_flags; + ulong wlfc_pub_lock_flags; +#endif /* BCMDBUS */ +#endif /* PROP_TXSTATUS */ + wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; + wait_queue_head_t dmaxfer_wait; + uint32 default_wd_interval; + + timer_list_compat_t timer; + bool wd_timer_valid; + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; +#ifdef BCMDBUS + ulong txqlock_flags; +#else + + struct semaphore sdsem; + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; +#endif /* BCMDBUS */ + + tsk_ctl_t thr_rxf_ctl; + spinlock_t rxf_lock; + bool rxthread_enabled; + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ + struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wake_lock wl_wdwake; /* Wifi wd wakelock */ + struct wake_lock wl_evtwake; /* Wifi event wakelock */ + struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ + struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ +#ifdef BCMPCIE_OOB_HOST_WAKE + struct wake_lock wl_intrwake; /* Host wakeup wakelock */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wake_lock wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ + + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + struct mutex dhd_apf_mutex; +#endif /* PKT_FILTER_SUPPORT && APF */ + spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_counter; + int wakelock_wd_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + bool waive_wakelock; + uint32 wakelock_before_waive; + + /* Thread to issue ioctl for multicast */ + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; +#ifdef SHOW_LOGTRACE + dhd_event_log_t event_data; +#endif /* SHOW_LOGTRACE */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef DHDTCPACK_SUPPRESS + spinlock_t tcpack_lock; +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ + void *dhd_deferred_wq; +#ifdef DEBUG_CPU_FREQ + struct notifier_block freq_trans; + int __percpu *new_freq; +#endif // endif + unsigned int unit; + struct notifier_block pm_notifier; +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_WET + uint32 wet_mode; +#endif /* DHD_WET */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + struct timer_list join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + struct timer_list scan_timer; + bool scan_timer_active; +#endif // endif +#if defined(DHD_LB) + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Tasklet to handle Tx Completion packet freeing */ + struct tasklet_struct tx_compl_tasklet; + atomic_t tx_compl_cpu; + + /* Tasklet to handle RxBuf Post during Rx completion */ + struct tasklet_struct rx_compl_tasklet; + atomic_t rx_compl_cpu; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct tx_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + /* Number of times NAPI processing ran on each available core */ + uint32 *napi_percpu_run_cnt; + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 *rxc_percpu_run_cnt; + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 *txc_percpu_run_cnt; + /* CPU status */ + /* Number of times each CPU came online */ + uint32 *cpu_online_cnt; + /* Number of times each CPU went offline */ + uint32 *cpu_offline_cnt; + + /* Number of times TX processing run on each core */ + uint32 *txp_percpu_run_cnt; + /* Number of times TX start run on each core */ + uint32 *tx_start_percpu_run_cnt; + + /* Tx load balancing */ + + /* TODO: Need to see if batch processing is really required in case of TX + * processing. In case of RX the Dongle can send a bunch of rx completions, + * hence we took a 3 queue approach + * enque - adds the skbs to rx_pend_queue + * dispatch - uses a lock and adds the list of skbs from pend queue to + * napi queue + * napi processing - copies the pend_queue into a local queue and works + * on it. + * But for TX its going to be 1 skb at a time, so we are just thinking + * of using only one queue and use the lock supported skb queue functions + * to add and process it. If its in-efficient we'll re-visit the queue + * design. + */ + + /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ + /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ + /* + * From the Tasklet that actually sends out data + * copy the list tx_pend_queue into tx_active_queue. There by we need + * to spinlock to only perform the copy the rest of the code ie to + * construct the tx_pend_queue and the code to process tx_active_queue + * can be lockless. The concept is borrowed as is from RX processing + */ + /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ + + /* Control TXP in runtime, enable by default */ + atomic_t lb_txp_active; + + /* Control RXP in runtime, enable by default */ + atomic_t lb_rxp_active; + + /* + * When the NET_TX tries to send a TX packet put it into tx_pend_queue + * For now, the processing tasklet will also direcly operate on this + * queue + */ + struct sk_buff_head tx_pend_queue ____cacheline_aligned; + + /* Control RXP in runtime, enable by default */ + /* cpu on which the DHD Tx is happenning */ + atomic_t tx_cpu; + + /* CPU on which the Network stack is calling the DHD's xmit function */ + atomic_t net_tx_cpu; + + /* Tasklet context from which the DHD's TX processing happens */ + struct tasklet_struct tx_tasklet; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 *napi_rx_hist[HIST_BIN_SIZE]; + uint32 *txc_hist[HIST_BIN_SIZE]; + uint32 *rxc_hist[HIST_BIN_SIZE]; +#endif /* DHD_LB */ +#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) + struct work_struct axi_error_dispatcher_work; +#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ +#ifdef SHOW_LOGTRACE +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + tsk_ctl_t thr_logtrace_ctl; +#else + struct delayed_work event_log_dispatcher_work; +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +#endif /* SHOW_LOGTRACE */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + struct kobject dhd_kobj; + struct kobject dhd_conf_file_kobj; + struct timer_list timesync_timer; +#if defined(BT_OVER_SDIO) + char btfw_path[PATH_MAX]; +#endif /* defined (BT_OVER_SDIO) */ +#ifdef WL_MONITOR + struct net_device *monitor_dev; /* monitor pseudo device */ + struct sk_buff *monitor_skb; + uint monitor_len; + uint monitor_type; /* monitor pseudo device */ +#endif /* WL_MONITOR */ +#if defined(BT_OVER_SDIO) + struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ + int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ +#endif /* BT_OVER_SDIO */ +#ifdef SHOW_LOGTRACE + struct sk_buff_head evt_trace_queue ____cacheline_aligned; +#endif // endif +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + struct workqueue_struct *tx_wq; + struct workqueue_struct *rx_wq; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DHD_DEBUG_UART + bool duart_execute; +#endif /* DHD_DEBUG_UART */ + struct mutex logdump_lock; + /* indicates mem_dump was scheduled as work queue or called directly */ + bool scheduled_memdump; + struct work_struct dhd_hang_process_work; +#ifdef DHD_HP2P + spinlock_t hp2p_lock; +#endif /* DHD_HP2P */ +} dhd_info_t; + +extern int dhd_sysfs_init(dhd_info_t *dhd); +extern void dhd_sysfs_exit(dhd_info_t *dhd); +extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); +extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); + +int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); + +#if defined(DHD_LB) +#if defined(DHD_LB_TXP) +int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); +void dhd_tx_dispatcher_work(struct work_struct * work); +void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); +void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); +void dhd_lb_tx_handler(unsigned long data); +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +int dhd_napi_poll(struct napi_struct *napi, int budget); +void dhd_rx_napi_dispatcher_fn(struct work_struct * work); +void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); +void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); +#endif /* DHD_LB_RXP */ + +void dhd_lb_set_default_cpus(dhd_info_t *dhd); +void dhd_cpumasks_deinit(dhd_info_t *dhd); +int dhd_cpumasks_init(dhd_info_t *dhd); + +void dhd_select_cpu_candidacy(dhd_info_t *dhd); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int dhd_cpu_startup_callback(unsigned int cpu); +int dhd_cpu_teardown_callback(unsigned int cpu); +#else +int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + +int dhd_register_cpuhp_callback(dhd_info_t *dhd); +int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); + +#if defined(DHD_LB_TXC) +void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); +void dhd_rx_compl_dispatcher_fn(struct work_struct * work); +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + +#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); +#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + +#endif /* __DHD_LINUX_PRIV_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_sched.c b/bcmdhd.100.10.315.x/dhd_linux_sched.c index d1268c7..fb76b3f 100644 --- a/bcmdhd.100.10.315.x/dhd_linux_sched.c +++ b/bcmdhd.100.10.315.x/dhd_linux_sched.c @@ -1,7 +1,7 @@ /* * Expose some of the kernel scheduler routines * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $ + * $Id: dhd_linux_sched.c 815919 2019-04-22 09:06:50Z $ */ #include #include @@ -35,17 +35,13 @@ int setScheduler(struct task_struct *p, int policy, struct sched_param *param) { int rc = 0; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) rc = sched_setscheduler(p, policy, param); -#endif /* LinuxVer */ return rc; } int get_scheduler_policy(struct task_struct *p) { int rc = SCHED_NORMAL; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) rc = p->policy; -#endif /* LinuxVer */ return rc; } diff --git a/bcmdhd.100.10.315.x/dhd_linux_wq.c b/bcmdhd.100.10.315.x/dhd_linux_wq.c index 54b13bd..c5f57bb 100644 --- a/bcmdhd.100.10.315.x/dhd_linux_wq.c +++ b/bcmdhd.100.10.315.x/dhd_linux_wq.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $ + * $Id: dhd_linux_wq.c 815919 2019-04-22 09:06:50Z $ */ #include @@ -85,15 +85,11 @@ dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) struct kfifo *fifo; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) - fifo = kfifo_init(buf, size, flags, lock); -#else fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); if (!fifo) { return NULL; } kfifo_init(fifo, buf, size); -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ return fifo; } @@ -101,10 +97,6 @@ static inline void dhd_kfifo_free(struct kfifo *fifo) { kfifo_free(fifo); -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) - /* FC11 releases the fifo memory */ - kfree(fifo); -#endif // endif } /* deferred work functions */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_wq.h b/bcmdhd.100.10.315.x/dhd_linux_wq.h index d373f04..add9416 100644 --- a/bcmdhd.100.10.315.x/dhd_linux_wq.h +++ b/bcmdhd.100.10.315.x/dhd_linux_wq.h @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux_wq.h 769906 2018-06-28 11:52:31Z $ + * $Id: dhd_linux_wq.h 814378 2019-04-11 02:21:31Z $ */ #ifndef _dhd_linux_wq_h_ #define _dhd_linux_wq_h_ @@ -39,7 +39,6 @@ enum _wq_event { DHD_WQ_WORK_SET_MCAST_LIST, DHD_WQ_WORK_IPV6_NDO, DHD_WQ_WORK_HANG_MSG, - DHD_WQ_WORK_SSSR_DUMP, DHD_WQ_WORK_DHD_LOG_DUMP, DHD_WQ_WORK_PKTLOG_DUMP, DHD_WQ_WORK_INFORM_DHD_MON, @@ -55,6 +54,8 @@ enum _wq_event { DHD_WQ_WORK_ERROR_RECOVERY, #endif /* DHD_ERPOM */ DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH, + DHD_WQ_WORK_AXI_ERROR_DUMP, + DHD_WQ_WORK_CTO_RECOVERY, #ifdef DHD_UPDATE_INTF_MAC DHD_WQ_WORK_IF_UPDATE, #endif /* DHD_UPDATE_INTF_MAC */ diff --git a/bcmdhd.100.10.315.x/dhd_mschdbg.c b/bcmdhd.100.10.315.x/dhd_mschdbg.c index 31645c3..bef05b6 100644 --- a/bcmdhd.100.10.315.x/dhd_mschdbg.c +++ b/bcmdhd.100.10.315.x/dhd_mschdbg.c @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -662,10 +662,15 @@ static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); MSCH_EVENT_HEAD(0); MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + bzero(&hdr, sizeof(hdr)); hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; hdr.count = p->hdr.count + 1; /* exclude LSB 2 bits which indicate binary/non-binary data */ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num); + if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) { + hdr.binary_payload = TRUE; + } dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); } lastMessages = TRUE; @@ -758,10 +763,15 @@ wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_l dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); MSCH_EVENT_HEAD(0); MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + bzero(&hdr, sizeof(hdr)); hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; hdr.count = p->hdr.count + 1; /* exclude LSB 2 bits which indicate binary/non-binary data */ hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num); + if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) { + hdr.binary_payload = TRUE; + } dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); } else { msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr; diff --git a/bcmdhd.100.10.315.x/dhd_mschdbg.h b/bcmdhd.100.10.315.x/dhd_mschdbg.h index 1e711da..8cb28c4 100644 --- a/bcmdhd.100.10.315.x/dhd_mschdbg.h +++ b/bcmdhd.100.10.315.x/dhd_mschdbg.h @@ -3,7 +3,7 @@ * * <> * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dhd_msgbuf.c b/bcmdhd.100.10.315.x/dhd_msgbuf.c index 8eb0b8a..33ceb5a 100644 --- a/bcmdhd.100.10.315.x/dhd_msgbuf.c +++ b/bcmdhd.100.10.315.x/dhd_msgbuf.c @@ -3,7 +3,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: dhd_msgbuf.c 769906 2018-06-28 11:52:31Z $ + * $Id: dhd_msgbuf.c 825801 2019-06-17 10:51:10Z $ */ #include @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -67,6 +68,10 @@ #include #include +#ifdef DHD_EWPR_VER2 +#include +#endif /* DHD_EWPR_VER2 */ + extern char dhd_version[]; extern char fw_version[]; @@ -166,10 +171,19 @@ struct msgbuf_ring; /* ring context for common and flow rings */ * Dongle advertizes host side sync mechanism requirements. */ -#define PCIE_D2H_SYNC_WAIT_TRIES (512UL) -#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL) +#define PCIE_D2H_SYNC_WAIT_TRIES (512U) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U) #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */ +#define HWA_DB_TYPE_RXPOST (0x0050) +#define HWA_DB_TYPE_TXCPLT (0x0060) +#define HWA_DB_TYPE_RXCPLT (0x0170) +#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16) + +#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */ +#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */ +#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */ + /** * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. * @@ -179,6 +193,18 @@ struct msgbuf_ring; /* ring context for common and flow rings */ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, volatile cmn_msg_hdr_t *msg, int msglen); +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * For EDL messages. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +#ifdef EWP_EDL +typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg); +#endif /* EWP_EDL */ + /* * +---------------------------------------------------------------------------- * @@ -286,6 +312,64 @@ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, #define DHD_DMA_PAD (128) #endif // endif +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ + (ring)++, (flowid)++) + /* Used in loopback tests */ typedef struct dhd_dmaxfer { dhd_dma_buf_t srcmem; @@ -295,6 +379,7 @@ typedef struct dhd_dmaxfer { uint32 len; bool in_progress; uint64 start_usec; + uint64 time_taken; uint32 d11_lpbk; int status; } dhd_dmaxfer_t; @@ -328,6 +413,7 @@ typedef struct msgbuf_ring { #endif /* TXP_FLUSH_NITEMS */ uint8 ring_type; + uint16 hwa_db_type; /* hwa type non-zero for Data path rings */ uint8 n_completion_ids; bool create_pending; uint16 create_req_id; @@ -351,6 +437,7 @@ int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM; /** DHD protocol handle. Is an opaque type to other DHD software layers. */ typedef struct dhd_prot { osl_t *osh; /* OSL handle */ + uint16 rxbufpost_sz; uint16 rxbufpost; uint16 max_rxbufpost; uint16 max_eventbufpost; @@ -411,6 +498,9 @@ typedef struct dhd_prot { uint32 flowring_num; d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ +#ifdef EWP_EDL + d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */ +#endif /* EWP_EDL */ ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ ulong d2h_sync_wait_tot; /* total wait loops */ @@ -424,6 +514,18 @@ typedef struct dhd_prot { void *pktid_tx_map; /* pktid map for tx path */ bool metadata_dbg; void *pktid_map_handle_ioctl; +#ifdef DHD_MAP_PKTID_LOGGING + void *pktid_dma_map; /* pktid map for DMA MAP */ + void *pktid_dma_unmap; /* pktid map for DMA UNMAP */ +#endif /* DHD_MAP_PKTID_LOGGING */ + uint32 pktid_depleted_cnt; /* pktid depleted count */ + /* netif tx queue stop count */ + uint8 pktid_txq_stop_cnt; + /* netif tx queue start count */ + uint8 pktid_txq_start_cnt; + uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */ + uint64 ioctl_ack_time; /* timestamp for ioctl ack */ + uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */ /* Applications/utilities can read tx and rx metadata using IOVARs */ uint16 rx_metadata_offset; @@ -459,11 +561,26 @@ typedef struct dhd_prot { bool no_aggr; bool fixed_rate; dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */ +#ifdef DHD_HP2P + msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */ + msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */ +#endif /* DHD_HP2P */ + bool no_tx_resource; } dhd_prot_t; +#ifdef DHD_EWPR_VER2 +#define HANG_INFO_BASE64_BUFFER_SIZE 640 +#endif // endif + #ifdef DHD_DUMP_PCIE_RINGS static -int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn); +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, + const void *user_buf, unsigned long *file_posn); +#ifdef EWP_EDL +static +int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf, + unsigned long *file_posn); +#endif /* EWP_EDL */ #endif /* DHD_DUMP_PCIE_RINGS */ extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp); @@ -580,12 +697,21 @@ static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf) static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf); static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf); static void dhd_prot_detach_info_rings(dhd_pub_t *dhd); +#ifdef DHD_HP2P +static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd); +#endif /* DHD_HP2P */ #ifdef EWP_EDL static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd); #endif // endif static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf); static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf); +#ifdef DHD_HP2P +static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus); +static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus); +static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid); +static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc); +#endif // endif typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); /** callback functions for messages generated by the dongle */ @@ -722,7 +848,6 @@ static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create uint16 ring_type, uint32 id); static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 type, uint32 id); -static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); /** * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has @@ -739,16 +864,28 @@ dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *rin { uint32 ring_seqnum = ring->seqnum; + if (dhd_query_bus_erros(dhd)) { + return; + } + DHD_ERROR(( "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>" - " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n", + " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n", dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, - ring->dma_buf.va, msg, ring->curr_rd)); + ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr)); dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL); - dhd->livelock_occured = TRUE; + /* Try to resume if already suspended or suspend in progress */ + + /* Skip if still in suspended or suspend in progress */ + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); + goto exit; + } + dhd_bus_dump_console_buffer(dhd->bus); dhd_prot_debug_info_print(dhd); @@ -760,8 +897,10 @@ dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *rin } #endif /* DHD_FW_COREDUMP */ +exit: dhd_schedule_reset(dhd); + dhd->livelock_occured = TRUE; } /** @@ -809,10 +948,16 @@ dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, msg_seqnum = *marker; if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ ring->seqnum++; /* next expected sequence number */ - goto dma_completed; + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } } - total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries); if (total_tries > prot->d2h_sync_wait_max) prot->d2h_sync_wait_max = total_tries; @@ -888,7 +1033,14 @@ dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, num_words); if (prot_checksum == 0U) { /* checksum is OK */ ring->seqnum++; /* next expected sequence number */ - goto dma_completed; + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to + * LIVELOCK error. + */ + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } } } @@ -926,7 +1078,16 @@ static uint8 BCMFASTPATH dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen) { - return msg->msg_type; + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { + DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__)); + return MSG_TYPE_INVALID; + } else { + return msg->msg_type; + } } #ifdef EWP_EDL @@ -1013,7 +1174,9 @@ BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring, if (valid_msg) { /* data is OK */ ring->seqnum++; /* next expected sequence number */ - goto dma_completed; + if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { + goto dma_completed; + } } } else { DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u." @@ -1071,6 +1234,30 @@ dma_completed: prot->d2h_sync_wait_tot += tries; return BCME_OK; } + +/** + * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static int BCMFASTPATH +dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg) +{ + /* Check for LIVELOCK induce flag, which is set by firing + * dhd iovar to induce LIVELOCK error. If flag is set, + * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { + DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__)); + return BCME_ERROR; + } else { + if (msg->msg_type == MSG_TYPE_INFO_PYLD) + return BCME_OK; + else + return msg->msg_type; + } +} #endif /* EWP_EDL */ INLINE void @@ -1104,14 +1291,33 @@ dhd_prot_d2h_sync_init(dhd_pub_t *dhd) prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + if (HWA_ACTIVE(dhd)) { + prot->d2hring_tx_cpln.hwa_db_type = + (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0; + prot->d2hring_rx_cpln.hwa_db_type = + (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0; + DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n", + __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type, + prot->d2hring_rx_cpln.hwa_db_type)); + } + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; +#endif /* EWP_EDL */ DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__)); } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; +#endif /* EWP_EDL */ DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__)); } else { prot->d2h_sync_cb = dhd_prot_d2h_sync_none; +#ifdef EWP_EDL + prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none; +#endif /* EWP_EDL */ DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__)); } } @@ -1124,6 +1330,14 @@ dhd_prot_h2d_sync_init(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + + if (HWA_ACTIVE(dhd)) { + prot->h2dring_rxp_subn.hwa_db_type = + (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0; + DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n", + __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type)); + } + prot->h2dring_rxp_subn.current_phase = 0; prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; @@ -1277,6 +1491,133 @@ dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ +/* + * +---------------------------------------------------------------------------+ + * DHD_MAP_PKTID_LOGGING + * Logging the PKTID and DMA map/unmap information for the SMMU fault issue + * debugging in customer platform. + * +---------------------------------------------------------------------------+ + */ + +#ifdef DHD_MAP_PKTID_LOGGING +typedef struct dhd_pktid_log_item { + dmaaddr_t pa; /* DMA bus address */ + uint64 ts_nsec; /* Timestamp: nsec */ + uint32 size; /* DMA map/unmap size */ + uint32 pktid; /* Packet ID */ + uint8 pkttype; /* Packet Type */ + uint8 rsvd[7]; /* Reserved for future use */ +} dhd_pktid_log_item_t; + +typedef struct dhd_pktid_log { + uint32 items; /* number of total items */ + uint32 index; /* index of pktid_log_item */ + dhd_pktid_log_item_t map[0]; /* metadata storage */ +} dhd_pktid_log_t; + +typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */ + +#define MAX_PKTID_LOG (2048) +#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t)) +#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \ + ((DHD_PKTID_LOG_ITEM_SZ) * (items))) + +#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl)) +#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl)) +#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \ + dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype)) +#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd)) + +static dhd_pktid_log_handle_t * +dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items) +{ + dhd_pktid_log_t *log; + uint32 log_size; + + log_size = DHD_PKTID_LOG_SZ(num_items); + log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size); + if (log == NULL) { + DHD_ERROR(("%s: MALLOC failed for size %d\n", + __FUNCTION__, log_size)); + return (dhd_pktid_log_handle_t *)NULL; + } + + log->items = num_items; + log->index = 0; + + return (dhd_pktid_log_handle_t *)log; /* opaque handle */ +} + +static void +dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle) +{ + dhd_pktid_log_t *log; + uint32 log_size; + + if (handle == NULL) { + DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__)); + return; + } + + log = (dhd_pktid_log_t *)handle; + log_size = DHD_PKTID_LOG_SZ(log->items); + MFREE(dhd->osh, handle, log_size); +} + +static void +dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa, + uint32 pktid, uint32 len, uint8 pkttype) +{ + dhd_pktid_log_t *log; + uint32 idx; + + if (handle == NULL) { + DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__)); + return; + } + + log = (dhd_pktid_log_t *)handle; + idx = log->index; + log->map[idx].ts_nsec = OSL_LOCALTIME_NS(); + log->map[idx].pa = pa; + log->map[idx].pktid = pktid; + log->map[idx].size = len; + log->map[idx].pkttype = pkttype; + log->index = (idx + 1) % (log->items); /* update index */ +} + +void +dhd_pktid_logging_dump(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + dhd_pktid_log_t *map_log, *unmap_log; + uint64 ts_sec, ts_usec; + + if (prot == NULL) { + DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__)); + return; + } + + map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map); + unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap); + OSL_GET_LOCALTIME(&ts_sec, &ts_usec); + if (map_log && unmap_log) { + DHD_ERROR(("%s: map_idx=%d unmap_idx=%d " + "current time=[%5lu.%06lu]\n", __FUNCTION__, + map_log->index, unmap_log->index, + (unsigned long)ts_sec, (unsigned long)ts_usec)); + DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, " + "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__, + (uint64)__virt_to_phys((ulong)(map_log->map)), + (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items), + (uint64)__virt_to_phys((ulong)(unmap_log->map)), + (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items))); + } +} +#endif /* DHD_MAP_PKTID_LOGGING */ + +/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */ + /* * +---------------------------------------------------------------------------+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. @@ -1290,7 +1631,7 @@ dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, #define DHD_PCIE_PKTID #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */ #define MAX_RX_PKTID (1024) -#define MAX_TX_PKTID (3072 * 2) +#define MAX_TX_PKTID (3072 * 12) /* On Router, the pktptr serves as a pktid. */ @@ -1310,6 +1651,8 @@ typedef enum dhd_pkttype { PKTTYPE_TSBUF_RX } dhd_pkttype_t; +#define DHD_PKTID_MIN_AVAIL_COUNT 512U +#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U) #define DHD_PKTID_INVALID (0U) #define DHD_IOCTL_REQ_PKTID (0xFFFE) #define DHD_FAKE_PKTID (0xFACE) @@ -1319,6 +1662,10 @@ typedef enum dhd_pkttype { #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8 +#ifdef DHD_HP2P +#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7 +#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6 +#endif /* DHD_HP2P */ #define IS_FLOWRING(ring) \ ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0)) @@ -1360,7 +1707,6 @@ static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *ha static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, void *dmah, void *secdma, dhd_pkttype_t pkttype); - /* Return an allocated pktid, retrieving previously saved pkt and metadata */ static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, @@ -1624,6 +1970,11 @@ __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, out: DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + + if (error != BCME_OK) { + dhd->pktid_audit_failed = TRUE; + } + return error; } @@ -1650,6 +2001,11 @@ dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid, const int test_for, void *msg, uint32 msg_len, const char *func) { int ret = BCME_OK; + + if (dhd_query_bus_erros(dhdp)) { + return BCME_ERROR; + } + ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func); if (ret == BCME_ERROR) { DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n", @@ -1819,6 +2175,11 @@ dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) #ifdef DHD_PKTID_AUDIT_RING DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ #endif /* DHD_PKTID_AUDIT_RING */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, + locker->pa, nkey, locker->len, + locker->pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ { if (SECURE_DMA_ENAB(dhd->osh)) @@ -2082,6 +2443,9 @@ dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, locker->pkttype = pkttype; locker->pkt = pkt; locker->state = LOCKER_IS_BUSY; /* make this locker busy */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ DHD_PKTID_UNLOCK(map->pktid_lock, flags); } @@ -2209,6 +2573,10 @@ dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, #if defined(DHD_PKTID_AUDIT_MAP) DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); #endif /* DHD_PKTID_AUDIT_MAP */ +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey, + (uint32)locker->len, pkttype); +#endif /* DHD_MAP_PKTID_LOGGING */ *pa = locker->pa; /* return contents of locker */ *len = (uint32)locker->len; @@ -2473,6 +2841,7 @@ dhd_prot_attach(dhd_pub_t *dhd) dhd->dma_h2d_ring_upd_support = FALSE; dhd->dma_ring_upd_overwrite = FALSE; + dhd->hwa_inited = 0; dhd->idma_inited = 0; dhd->ifrm_inited = 0; dhd->dar_inited = 0; @@ -2589,6 +2958,20 @@ dhd_prot_attach(dhd_pub_t *dhd) } #endif /* IOCTLRESP_USE_CONSTMEM */ +#ifdef DHD_MAP_PKTID_LOGGING + prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); + if (prot->pktid_dma_map == NULL) { + DHD_ERROR(("%s: failed to allocate pktid_dma_map\n", + __FUNCTION__)); + } + + prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); + if (prot->pktid_dma_unmap == NULL) { + DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n", + __FUNCTION__)); + } +#endif /* DHD_MAP_PKTID_LOGGING */ + /* Initialize the work queues to be used by the Load Balancing logic */ #if defined(DHD_LB_TXC) { @@ -2713,6 +3096,12 @@ dhd_set_host_cap(dhd_pub_t *dhd) data |= HOSTCAP_FAST_DELETE_RING; } + if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) { + DHD_ERROR(("HWA inited\n")); + /* TODO: Is hostcap needed? */ + dhd->hwa_inited = TRUE; + } + if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { DHD_ERROR(("IDMA inited\n")); data |= HOSTCAP_H2D_IDMA; @@ -2748,6 +3137,32 @@ dhd_set_host_cap(dhd_pub_t *dhd) } #endif /* EWP_EDL */ +#ifdef DHD_HP2P + if (dhd->hp2p_capable) { + data |= HOSTCAP_PKT_TIMESTAMP; + data |= HOSTCAP_PKT_HP2P; + DHD_ERROR(("Enable HP2P in host cap\n")); + } else { + DHD_ERROR(("HP2P not enabled in host cap\n")); + } +#endif // endif + +#ifdef DHD_DB0TS + if (dhd->db0ts_capable) { + data |= HOSTCAP_DB0_TIMESTAMP; + DHD_ERROR(("Enable DB0 TS in host cap\n")); + } else { + DHD_ERROR(("DB0 TS not enabled in host cap\n")); + } +#endif /* DHD_DB0TS */ + if (dhd->extdtxs_in_txcpl) { + DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n")); + data |= HOSTCAP_PKT_TXSTATUS; + } + else { + DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n")); + } + DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", __FUNCTION__, prot->active_ipc_version, prot->host_ipc_version, @@ -2756,7 +3171,6 @@ dhd_set_host_cap(dhd_pub_t *dhd) dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); - } } @@ -2838,6 +3252,7 @@ dhd_prot_init(dhd_pub_t *dhd) prot->device_ipc_version = dhd->bus->api.fw_rev; prot->host_ipc_version = PCIE_SHARED_VERSION; + prot->no_tx_resource = FALSE; /* Init the host API version */ dhd_set_host_cap(dhd); @@ -2900,9 +3315,9 @@ dhd_prot_init(dhd_pub_t *dhd) dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), D2H_DMA_INDX_RD_BUF, 0); } - /* Signal to the dongle that common ring init is complete */ - dhd_bus_hostready(dhd->bus); + if (dhd->hostrdy_after_init) + dhd_bus_hostready(dhd->bus); /* * If the DMA-able buffers for flowring needs to come from a specific @@ -2957,8 +3372,6 @@ dhd_prot_init(dhd_pub_t *dhd) /* Post to dongle host configured soft doorbells */ dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); - /* Post buffers for packet reception and ioctl/event responses */ - dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); dhd_msgbuf_rxbuf_post_event_bufs(dhd); @@ -3006,6 +3419,19 @@ dhd_prot_init(dhd_pub_t *dhd) } #endif /* EWP_EDL */ +#ifdef DHD_HP2P + /* create HPP txcmpl/rxcmpl rings */ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) { + if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } +#endif /* DHD_HP2P */ + return BCME_OK; } /* dhd_prot_init */ @@ -3054,6 +3480,10 @@ void dhd_prot_detach(dhd_pub_t *dhd) #ifdef EWP_EDL dhd_prot_detach_edl_rings(dhd); #endif // endif +#ifdef DHD_HP2P + /* detach HPP rings */ + dhd_prot_detach_hp2p_rings(dhd); +#endif /* DHD_HP2P */ /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise @@ -3070,6 +3500,10 @@ void dhd_prot_detach(dhd_pub_t *dhd) #ifdef IOCTLRESP_USE_CONSTMEM DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); #endif // endif +#ifdef DHD_MAP_PKTID_LOGGING + DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map); + DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap); +#endif /* DHD_MAP_PKTID_LOGGING */ #if defined(DHD_LB_TXC) if (prot->tx_compl_prod.buffer) @@ -3170,6 +3604,15 @@ dhd_prot_reset(dhd_pub_t *dhd) dhd_flow_rings_deinit(dhd); } +#ifdef DHD_HP2P + if (prot->d2hring_hp2p_txcpl) { + dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl); + } + if (prot->d2hring_hp2p_rxcpl) { + dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl); + } +#endif /* DHD_HP2P */ + /* Reset PKTID map */ DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); @@ -3283,6 +3726,9 @@ dhd_lb_tx_compl_handler(unsigned long data) DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); #if defined(BCMPCIE) dhd_txcomplete(dhd, pkt, true); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ #endif // endif PKTFREE(dhd->osh, pkt, TRUE); @@ -3350,6 +3796,11 @@ dhd_check_create_info_rings(dhd_pub_t *dhd) BCMPCIE_H2D_COMMON_MSGRINGS; } + if (prot->d2hring_info_cpln) { + /* for d2hring re-entry case, clear inited flag */ + prot->d2hring_info_cpln->inited = FALSE; + } + if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ } @@ -3435,7 +3886,10 @@ dhd_prot_init_info_rings(dhd_pub_t *dhd) if (ret != BCME_OK) return ret; + prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_info_subn->current_phase = 0; prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); prot->h2dring_info_subn->n_completion_ids = 1; @@ -3465,63 +3919,202 @@ dhd_prot_detach_info_rings(dhd_pub_t *dhd) } } -#ifdef EWP_EDL +#ifdef DHD_HP2P static int -dhd_check_create_edl_rings(dhd_pub_t *dhd) +dhd_check_create_hp2p_rings(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; int ret = BCME_ERROR; uint16 ringid; - { - /* dongle may increase max_submission_rings so keep - * ringid at end of dynamic rings (re-use info ring cpl ring id) - */ - ringid = dhd->bus->max_tx_flowrings + - (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + - BCMPCIE_H2D_COMMON_MSGRINGS + 1; - } + /* Last 2 dynamic ring indices are used by hp2p rings */ + ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2; - if (prot->d2hring_edl) { - return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ - } + if (prot->d2hring_hp2p_txcpl == NULL) { + prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); - if (prot->d2hring_edl == NULL) { - prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + if (prot->d2hring_hp2p_txcpl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n", + __FUNCTION__)); + return BCME_NOMEM; + } - if (prot->d2hring_edl == NULL) { - DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n", + DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl", + dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n", + __FUNCTION__)); + goto err2; + } + } else { + /* for re-entry case, clear inited flag */ + prot->d2hring_hp2p_txcpl->inited = FALSE; + } + if (prot->d2hring_hp2p_rxcpl == NULL) { + prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_hp2p_rxcpl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n", __FUNCTION__)); return BCME_NOMEM; } - DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__, - ringid)); - ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl", - D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE, + /* create the hp2p rx completion ring next to hp2p tx compl ring + * ringid = id next to hp2p tx compl ring + */ + ringid = ringid + 1; + + DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl", + dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE, ringid); if (ret != BCME_OK) { - DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n", + DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n", __FUNCTION__)); - goto err; + goto err1; } + } else { + /* for re-entry case, clear inited flag */ + prot->d2hring_hp2p_rxcpl->inited = FALSE; } return ret; -err: - MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t)); - prot->d2hring_edl = NULL; +err1: + MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); + prot->d2hring_hp2p_rxcpl = NULL; +err2: + MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); + prot->d2hring_hp2p_txcpl = NULL; return ret; -} /* dhd_check_create_btlog_rings */ +} /* dhd_check_create_hp2p_rings */ int -dhd_prot_init_edl_rings(dhd_pub_t *dhd) +dhd_prot_init_hp2p_rings(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; - int ret = BCME_ERROR; + int ret = BCME_OK; - if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) { + dhd->hp2p_ring_active = FALSE; + + if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: hp2p rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) { + DHD_INFO(("hp2p tx completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n", + prot->d2hring_hp2p_txcpl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl, + BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) { + DHD_INFO(("hp2p rx completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n", + prot->d2hring_hp2p_rxcpl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl, + BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return BCME_OK; +} /* dhd_prot_init_hp2p_rings */ + +static void +dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->d2hring_hp2p_txcpl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_hp2p_txcpl = NULL; + } + if (dhd->prot->d2hring_hp2p_rxcpl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_hp2p_rxcpl = NULL; + } +} +#endif /* DHD_HP2P */ + +#ifdef EWP_EDL +static int +dhd_check_create_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + + { + /* dongle may increase max_submission_rings so keep + * ringid at end of dynamic rings (re-use info ring cpl ring id) + */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS + 1; + } + + if (prot->d2hring_edl) { + prot->d2hring_edl->inited = FALSE; + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->d2hring_edl == NULL) { + prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_edl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__, + ringid)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl", + D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n", + __FUNCTION__)); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t)); + prot->d2hring_edl = NULL; + + return ret; +} /* dhd_check_create_btlog_rings */ + +int +dhd_prot_init_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + + if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) { DHD_ERROR(("%s: EDL rings aren't created! \n", __FUNCTION__)); return ret; @@ -3539,6 +4132,7 @@ dhd_prot_init_edl_rings(dhd_pub_t *dhd) return ret; prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; return BCME_OK; } /* dhd_prot_init_btlog_rings */ @@ -3562,6 +4156,8 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd) { int ret = 0; wlc_rev_info_t revinfo; + char buf[128]; + dhd_prot_t *prot = dhd->prot; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -3588,6 +4184,28 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd) DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + /* Get the RxBuf post size */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n", + __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ)); + prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16)); + if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) { + DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n", + __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ)); + prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz)); + } + } + + /* Post buffers for packet reception */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + DHD_SSSR_DUMP_INIT(dhd); dhd_process_cid_mac(dhd, TRUE); @@ -3595,15 +4213,24 @@ int dhd_sync_with_dongle(dhd_pub_t *dhd) dhd_process_cid_mac(dhd, FALSE); #if defined(DHD_H2D_LOG_TIME_SYNC) +#ifdef DHD_HP2P + if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) { + if (dhd->hp2p_enable) { + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40; + } else { + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; + } +#else if (FW_SUPPORTED(dhd, h2dlogts)) { dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; +#endif // endif dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US(); /* This is during initialization. */ dhd_h2d_log_time_sync(dhd); } else { dhd->dhd_rte_time_sync_ms = 0; } -#endif /* DHD_H2D_LOG_TIME_SYNC */ +#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */ /* Always assumes wl for now */ dhd->iswl = TRUE; done: @@ -3839,7 +4466,6 @@ static int BCMFASTPATH dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) { void *p, **pktbuf; - uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; uint8 *rxbuf_post_tmp; host_rxbuf_post_t *rxbuf_post; void *msg_start; @@ -3852,12 +4478,8 @@ dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; void *lcl_buf; uint16 lcl_buf_size; + uint16 pktsz = prot->rxbufpost_sz; -#ifdef WL_MONITOR - if (dhd->monitor_enable) { - pktsz = DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ; - } -#endif /* WL_MONITOR */ /* allocate a local buffer to store pkt buffer va, pa and length */ lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * RX_BUF_BURST; @@ -4572,17 +5194,20 @@ dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound) uint8 *msg_addr; uint32 msg_len; + if (dhd_query_bus_erros(dhd)) { + more = FALSE; + break; + } + if (dhd->hang_was_sent) { more = FALSE; break; } -#ifdef DHD_MAP_LOGGING if (dhd->smmu_fault_occurred) { more = FALSE; break; } -#endif /* DHD_MAP_LOGGING */ DHD_RING_LOCK(ring->ring_lock, flags); /* Get the message from ring */ @@ -4635,6 +5260,10 @@ dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd) return FALSE; } + if (dhd_query_bus_erros(dhd)) { + return FALSE; + } + if (dhd->hang_was_sent) { return FALSE; } @@ -4684,8 +5313,9 @@ dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd) } if (items > D2HRING_EDL_WATERMARK) { - DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u \n", - __FUNCTION__, items)); + DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;" + " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items, + ring->rd, ring->wr, depth)); } dhd_schedule_logtrace(dhd->info); @@ -4737,10 +5367,10 @@ dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) * doorbell is acceptable since EDL only contains debug data */ num_items = READ_AVAIL_SPACE(wr, ring->rd, depth); - DHD_RING_UNLOCK(ring->ring_lock, flags); if (num_items == 0) { /* no work items in edl ring */ + DHD_RING_UNLOCK(ring->ring_lock, flags); return 0; } @@ -4749,18 +5379,23 @@ dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) /* if space is available, calculate address to be read */ msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len); - n = max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND); + + max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND); + + DHD_RING_UNLOCK(ring->ring_lock, flags); /* Prefetch data to populate the cache */ OSL_PREFETCH(msg_addr); + n = max_items_to_process; while (n > 0) { msg = (cmn_msg_hdr_t *)msg_addr; /* wait for DMA of work item to complete */ - if ((err = dhd_prot_d2h_sync_edl(dhd, ring, msg)) != BCME_OK) { - DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n", - __FUNCTION__, err)); + if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) { + DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL " + "ring; err = %d\n", __FUNCTION__, err)); } + /* * Update the curr_rd to the current index in the ring, from where * the work item is fetched. This way if the fetched work item @@ -4780,29 +5415,8 @@ dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) /* process the edl work item, i.e, the event log */ err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data); - /* memset the read work item space to zero to avoid - * any accidental matching of seqnum - */ - memset(msg_addr, 0, ring->item_len); - - /* update rd index by 1 - * The TCM rd index is updated only if bus is not - * in D3. Else, the rd index is updated from resume - * context in - 'dhdpcie_bus_suspend' - */ - DHD_RING_LOCK(ring->ring_lock, flags); - ring->rd = ring->curr_rd; - ASSERT(ring->rd < ring->max_items); - DHD_RING_UNLOCK(ring->ring_lock, flags); - - DHD_GENERAL_LOCK(dhd, flags); - if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { - DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", - __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); - } else { - DHD_EDL_RING_TCM_RD_UPDATE(dhd); - } - DHD_GENERAL_UNLOCK(dhd, flags); + /* Dummy sleep so that scheduler kicks in after processing any logprints */ + OSL_SLEEP(0); /* Prefetch data to populate the cache */ OSL_PREFETCH(msg_addr + ring->item_len); @@ -4811,6 +5425,29 @@ dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) --n; } + DHD_RING_LOCK(ring->ring_lock, flags); + /* update host ring read pointer */ + if ((ring->rd + max_items_to_process) >= ring->max_items) + ring->rd = 0; + else + ring->rd += max_items_to_process; + DHD_RING_UNLOCK(ring->ring_lock, flags); + + /* Now after processing max_items_to_process update dongle rd index. + * The TCM rd index is updated only if bus is not + * in D3. Else, the rd index is updated from resume + * context in - 'dhdpcie_bus_suspend' + */ + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { + DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_EDL_RING_TCM_RD_UPDATE(dhd); + } + /* if num_items > bound, then anyway we will reschedule and * this function runs again, so that if in between the DPC has * updated the wr index, then the updated wr is read. But if @@ -4853,6 +5490,8 @@ void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd) { dhd_prot_t *prot = NULL; + unsigned long flags = 0; + msgbuf_ring_t *ring = NULL; if (!dhd) return; @@ -4861,19 +5500,22 @@ dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd) if (!prot || !prot->d2hring_edl) return; - dhd_prot_upd_read_idx(dhd, prot->d2hring_edl); + ring = prot->d2hring_edl; + DHD_RING_LOCK(ring->ring_lock, flags); + dhd_prot_upd_read_idx(dhd, ring); + DHD_RING_UNLOCK(ring->ring_lock, flags); } #endif /* EWP_EDL */ /* called when DHD needs to check for 'receive complete' messages from the dongle */ bool BCMFASTPATH -dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype) { bool more = FALSE; uint n = 0; dhd_prot_t *prot = dhd->prot; - msgbuf_ring_t *ring = &prot->d2hring_rx_cpln; - uint16 item_len = ring->item_len; + msgbuf_ring_t *ring; + uint16 item_len; host_rxbuf_cmpl_t *msg = NULL; uint8 *msg_addr; uint32 msg_len; @@ -4888,19 +5530,29 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) uint32 pktid; int i; uint8 sync; + ts_timestamp_t *ts; + BCM_REFERENCE(ts); +#ifdef DHD_HP2P + if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl) + ring = prot->d2hring_hp2p_rxcpl; + else +#endif /* DHD_HP2P */ + ring = &prot->d2hring_rx_cpln; + item_len = ring->item_len; while (1) { if (dhd_is_device_removed(dhd)) break; + if (dhd_query_bus_erros(dhd)) + break; + if (dhd->hang_was_sent) break; -#ifdef DHD_MAP_LOGGING if (dhd->smmu_fault_occurred) { break; } -#endif /* DHD_MAP_LOGGING */ pkt_cnt = 0; pktqhead = pkt_newidx = NULL; @@ -4997,13 +5649,18 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) } /* Actual length of the packet */ PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); + #if defined(WL_MONITOR) - if (dhd_monitor_enabled(dhd, ifidx) && - (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) { - dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); - continue; + if (dhd_monitor_enabled(dhd, ifidx)) { + if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { + dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); + continue; + } else { + DHD_ERROR(("Received non 802.11 packet, " + "when monitor mode is enabled\n")); + } } -#endif // endif +#endif /* WL_MONITOR */ if (!pktqhead) { pktqhead = prevpkt = pkt; @@ -5021,6 +5678,15 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) } } +#ifdef DHD_HP2P + if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) { +#ifdef DHD_HP2P_DEBUG + bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t)); +#endif /* DHD_HP2P_DEBUG */ + dhd_update_hp2p_rxstats(dhd, msg); + } +#endif /* DHD_HP2P */ + #ifdef DHD_LBUF_AUDIT PKTAUDIT(dhd->osh, pkt); #endif // endif @@ -5080,12 +5746,17 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) } /* Call lb_dispatch only if packets are queued */ - if (n) { + if (n && +#ifdef WL_MONITOR + !(dhd_monitor_enabled(dhd, ifidx)) && +#endif /* WL_MONITOR */ + TRUE) { DHD_LB_DISPATCH_RX_COMPL(dhd); DHD_LB_DISPATCH_RX_PROCESS(dhd); } return more; + } /** @@ -5114,29 +5785,39 @@ dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) /** called when DHD needs to check for 'transmit complete' messages from the dongle */ bool BCMFASTPATH -dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) +dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype) { bool more = TRUE; uint n = 0; - msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; + msgbuf_ring_t *ring; unsigned long flags; +#ifdef DHD_HP2P + if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl) + ring = dhd->prot->d2hring_hp2p_txcpl; + else +#endif /* DHD_HP2P */ + ring = &dhd->prot->d2hring_tx_cpln; + /* Process all the messages - DTOH direction */ while (!dhd_is_device_removed(dhd)) { uint8 *msg_addr; uint32 msg_len; + if (dhd_query_bus_erros(dhd)) { + more = FALSE; + break; + } + if (dhd->hang_was_sent) { more = FALSE; break; } -#ifdef DHD_MAP_LOGGING if (dhd->smmu_fault_occurred) { more = FALSE; break; } -#endif /* DHD_MAP_LOGGING */ DHD_RING_LOCK(ring->ring_lock, flags); /* Get the address of the next message to be read from ring */ @@ -5219,15 +5900,17 @@ dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) uint8 *msg_addr; uint32 msg_len; + if (dhd_query_bus_erros(dhd)) { + break; + } + if (dhd->hang_was_sent) { break; } -#ifdef DHD_MAP_LOGGING if (dhd->smmu_fault_occurred) { break; } -#endif /* DHD_MAP_LOGGING */ DHD_RING_LOCK(ring->ring_lock, flags); /* Get the address of the next message to be read from ring */ @@ -5280,12 +5963,10 @@ dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 goto done; } -#ifdef DHD_MAP_LOGGING if (dhd->smmu_fault_occurred) { ret = BCME_ERROR; goto done; } -#endif /* DHD_MAP_LOGGING */ msg = (cmn_msg_hdr_t *)buf; @@ -5407,6 +6088,32 @@ dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__)); } } +#ifdef DHD_HP2P + else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) { + if (dhd->prot->d2hring_hp2p_txcpl != NULL) { + if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for hp2p ring\n")); + dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) { + if (dhd->prot->d2hring_hp2p_rxcpl != NULL) { + if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n")); + dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n")); + } else { + DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__)); + } + } +#endif /* DHD_HP2P */ else { DHD_ERROR(("don;t know how to pair with original request\n")); } @@ -5453,6 +6160,8 @@ dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) } #endif // endif + dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS(); + DHD_GENERAL_LOCK(dhd, flags); if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { @@ -5484,6 +6193,15 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) unsigned long flags; dhd_dma_buf_t retbuf; + /* Check for ioctl timeout induce flag, which is set by firing + * dhd iovar to induce IOCTL timeout. If flag is set, + * return from here, which results in to IOCTL timeout. + */ + if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) { + DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__)); + return; + } + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); @@ -5509,6 +6227,8 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) return; } + dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS(); + /* Clear Response pending bit */ prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; DHD_GENERAL_UNLOCK(dhd, flags); @@ -5578,6 +6298,24 @@ exit: dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); } +int +dhd_prot_check_tx_resource(dhd_pub_t *dhd) +{ + return dhd->prot->no_tx_resource; +} + +void +dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd) +{ + dhd->prot->pktid_txq_stop_cnt++; +} + +void +dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd) +{ + dhd->prot->pktid_txq_start_cnt++; +} + /** called on MSG_TYPE_TX_STATUS message received from dongle */ static void BCMFASTPATH dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) @@ -5593,15 +6331,17 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) void *secdma; bool pkt_fate; msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; -#ifdef TX_STATUS_LATENCY_STATS +#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P) flow_info_t *flow_info; uint64 tx_status_latency; -#endif /* TX_STATUS_LATENCY_STATS */ +#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */ #if defined(TX_STATUS_LATENCY_STATS) flow_ring_node_t *flow_ring_node; uint16 flowid; #endif // endif + ts_timestamp_t *ts; + BCM_REFERENCE(ts); txstatus = (host_txbuf_cmpl_t *)msg; #if defined(TX_STATUS_LATENCY_STATS) flowid = txstatus->compl_hdr.flow_ring_id; @@ -5642,6 +6382,11 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) return; } + if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) { + dhd->prot->no_tx_resource = FALSE; + dhd_bus_start_queue(dhd->bus); + } + if (SECURE_DMA_ENAB(dhd->osh)) { int offset = 0; BCM_REFERENCE(offset); @@ -5714,8 +6459,12 @@ workq_ring_full: #endif /* DMAMAP_STATS */ pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid, ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK); + #if defined(BCMPCIE) dhd_txcomplete(dhd, pkt, pkt_fate); +#ifdef DHD_4WAYM4_FAIL_DISCONNECT + dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id); +#endif /* DHD_4WAYM4_FAIL_DISCONNECT */ #endif // endif #if DHD_DBG_SHOW_METADATA @@ -5732,17 +6481,23 @@ workq_ring_full: } #endif /* DHD_DBG_SHOW_METADATA */ +#ifdef DHD_HP2P + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { +#ifdef DHD_HP2P_DEBUG + bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t)); +#endif /* DHD_HP2P_DEBUG */ + dhd_update_hp2p_txstats(dhd, txstatus); + } +#endif /* DHD_HP2P */ + #ifdef DHD_LBUF_AUDIT PKTAUDIT(dhd->osh, pkt); #endif // endif - DHD_RING_UNLOCK(ring->ring_lock, flags); - PKTFREE(dhd->osh, pkt, TRUE); - DHD_RING_LOCK(ring->ring_lock, flags); DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, txstatus->tx_status); DHD_RING_UNLOCK(ring->ring_lock, flags); - + PKTFREE(dhd->osh, pkt, TRUE); return; } /* dhd_prot_txstatus_process */ @@ -5901,6 +6656,18 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) DHD_ERROR(("dhd flow_ring_table is NULL\n")); return BCME_NORESOURCE; } +#ifdef DHD_PCIE_PKTID + if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) { + if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) { + dhd_bus_stop_queue(dhd->bus); + dhd->prot->no_tx_resource = TRUE; + } + dhd->prot->pktid_depleted_cnt++; + goto err_no_res; + } else { + dhd->prot->pktid_depleted_cnt = 0; + } +#endif /* DHD_PCIE_PKTID */ flowid = DHD_PKT_GET_FLOWID(PKTBUF); flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; @@ -5935,12 +6702,12 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) goto err_free_pktid; } - DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); - /* Extract the data pointer and length information */ pktdata = PKTDATA(dhd->osh, PKTBUF); pktlen = PKTLEN(dhd->osh, PKTBUF); + DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); + /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); @@ -6064,9 +6831,17 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); } else { - txdesc->metadata_buf_len = htol16(0); - txdesc->metadata_buf_addr.high_addr = 0; - txdesc->metadata_buf_addr.low_addr = 0; +#ifdef DHD_HP2P + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { + dhd_update_hp2p_txdesc(dhd, txdesc); + } else +#endif /* DHD_HP2P */ + if (1) + { + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } } #ifdef DHD_PKTID_AUDIT_RING @@ -6090,7 +6865,16 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) } /* Update the write pointer in TCM & ring bell */ -#ifdef TXP_FLUSH_NITEMS +#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS) + if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { + dhd_calc_hp2p_burst(dhd, ring, flowid); + } else { + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { + dhd_prot_txdata_write_flush(dhd, flowid); + } + } +#elif defined(TXP_FLUSH_NITEMS) /* Flush if we have either hit the txp_threshold or if this msg is */ /* occupying the last slot in the flow_ring - before wrap around. */ if ((ring->pend_items_count == prot->txp_threshold) || @@ -6100,11 +6884,12 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) #else /* update ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); -#endif // endif -#ifdef TX_STATUS_LATENCY_STATS +#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */ + +#if defined(TX_STATUS_LATENCY_STATS) /* set the time when pkt is queued to flowring */ DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US()); -#endif /* TX_STATUS_LATENCY_STATS */ +#endif // endif OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count); /* @@ -6147,7 +6932,7 @@ err_no_res_pktfree: #endif /* DHD_PCIE_PKTID */ DHD_RING_UNLOCK(ring->ring_lock, flags); - +err_no_res: return BCME_NORESOURCE; } /* dhd_prot_txdata */ @@ -6288,12 +7073,17 @@ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int uint8 action; if (dhd->bus->is_linkdown) { - DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__)); + DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if (dhd_query_bus_erros(dhd)) { + DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__)); goto done; } if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { - DHD_ERROR(("%s : bus is down. we have nothing to do -" + DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -" " bus state: %d, sent hang: %d\n", __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); goto done; @@ -6451,6 +7241,10 @@ dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) dhd_dma_buf_free(dhdp, dmmap->dstmem); MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t)); + + dhdp->bus->dmaxfer_complete = TRUE; + dhd_os_dmaxfer_wake(dhdp); + dmmap = NULL; } /* dmaxfer_free_prev_dmaaddr */ @@ -6509,6 +7303,7 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) dhd_prot_t *prot = dhd->prot; uint64 end_usec; pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg; + int buf_free_scheduled; BCM_REFERENCE(cmplt); end_usec = OSL_SYSUPTIME_US(); @@ -6521,11 +7316,16 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) prot->dmaxfer.dstmem.va, prot->dmaxfer.len) || cmplt->compl_hdr.status != BCME_OK) { DHD_ERROR(("DMA loopback failed\n")); + /* it is observed that some times the completion + * header status is set as OK, but the memcmp fails + * hence always explicitly set the dmaxfer status + * as error if this happens. + */ + prot->dmaxfer.status = BCME_ERROR; prhex("XFER SRC: ", prot->dmaxfer.srcmem.va, prot->dmaxfer.len); prhex("XFER DST: ", prot->dmaxfer.dstmem.va, prot->dmaxfer.len); - prot->dmaxfer.status = BCME_ERROR; } else { switch (prot->dmaxfer.d11_lpbk) { @@ -6562,16 +7362,20 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) } } - dhd_prepare_schedule_dmaxfer_free(dhd); + buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd); end_usec -= prot->dmaxfer.start_usec; - if (end_usec) + if (end_usec) { + prot->dmaxfer.time_taken = end_usec; DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n", prot->dmaxfer.len, (unsigned long)end_usec, (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec))); + } dhd->prot->dmaxfer.in_progress = FALSE; - dhd->bus->dmaxfer_complete = TRUE; - dhd_os_dmaxfer_wake(dhd); + if (buf_free_scheduled != BCME_OK) { + dhd->bus->dmaxfer_complete = TRUE; + dhd_os_dmaxfer_wake(dhd); + } } /** Test functionality. @@ -6653,17 +7457,30 @@ dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, return BCME_OK; } /* dhdmsgbuf_dmaxfer_req */ -dma_xfer_status_t -dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd) +int +dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result) { dhd_prot_t *prot = dhd->prot; if (prot->dmaxfer.in_progress) - return DMA_XFER_IN_PROGRESS; - else if (prot->dmaxfer.status == BCME_OK) - return DMA_XFER_SUCCESS; + result->status = DMA_XFER_IN_PROGRESS; + else if (prot->dmaxfer.status == 0) + result->status = DMA_XFER_SUCCESS; else - return DMA_XFER_FAILED; + result->status = DMA_XFER_FAILED; + + result->type = prot->dmaxfer.d11_lpbk; + result->error_code = prot->dmaxfer.status; + result->num_bytes = prot->dmaxfer.len; + result->time_taken = prot->dmaxfer.time_taken; + if (prot->dmaxfer.time_taken) { + /* throughput in kBps */ + result->tput = + (prot->dmaxfer.len * (1000 * 1000 / 1024)) / + (uint32)prot->dmaxfer.time_taken; + } + + return BCME_OK; } /** Called in the process of submitting an ioctl to the dongle */ @@ -6736,18 +7553,25 @@ done: void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd) { - uint32 intstatus; dhd_prot_t *prot = dhd->prot; - dhd->rxcnt_timeout++; dhd->rx_ctlerrs++; dhd->iovar_timeout_occured = TRUE; - DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " - "trans_id %d state %d busstate=%d ioctl_received=%d\n", - __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, - prot->ioctl_trans_id, prot->ioctl_state, - dhd->busstate, prot->ioctl_received)); + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__, + dhd->is_sched_error ? " due to scheduling problem" : "", + dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id, + prot->ioctl_state, dhd->busstate, prot->ioctl_received)); +#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP) + if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) { + /* change g_assert_type to trigger Kernel panic */ + g_assert_type = 2; + /* use ASSERT() to trigger panic */ + ASSERT(0); + } +#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */ + if (prot->curr_ioctl_cmd == WLC_SET_VAR || prot->curr_ioctl_cmd == WLC_GET_VAR) { char iovbuf[32]; @@ -6772,7 +7596,7 @@ dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd) /* Check the PCIe link status by reading intstatus register */ intstatus = si_corereg(dhd->bus->sih, - dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); + dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); if (intstatus == (uint32)-1) { DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); dhd->bus->is_linkdown = TRUE; @@ -6802,7 +7626,7 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) goto out; } - timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false); + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); #ifdef DHD_RECOVER_TIMEOUT if (prot->ioctl_received == 0) { @@ -6838,7 +7662,7 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) dhd->bus->ipend = TRUE; dhd->bus->dpc_sched = TRUE; dhd_sched_dpc(dhd); - timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true); + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); } } } else { @@ -6846,6 +7670,11 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) } if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + /* check if resumed on time out related to scheduling issue */ + dhd->is_sched_error = FALSE; + if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) { + dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd); + } dhd_msgbuf_iovar_timeout_dump(dhd); @@ -6857,6 +7686,7 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) dhd_bus_mem_dump(dhd); } #endif /* DHD_FW_COREDUMP */ + ret = -ETIMEDOUT; goto out; } else { @@ -6946,11 +7776,14 @@ int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, } #ifdef DHD_DUMP_PCIE_RINGS -int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn) +int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf, + unsigned long *file_posn, bool file_write) { dhd_prot_t *prot; msgbuf_ring_t *ring; int ret = 0; + uint16 h2d_flowrings_total; + uint16 flowid; if (!(dhd) || !(dhd->prot)) { goto exit; @@ -6959,41 +7792,49 @@ int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn) /* Below is the same ring dump sequence followed in parser as well. */ ring = &prot->h2dring_ctrl_subn; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; ring = &prot->h2dring_rxp_subn; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; ring = &prot->d2hring_ctrl_cpln; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; ring = &prot->d2hring_tx_cpln; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; ring = &prot->d2hring_rx_cpln; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; - ring = prot->h2d_flowrings_pool; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) - goto exit; + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) { + goto exit; + } + } #ifdef EWP_EDL - if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) + if (dhd->dongle_edl_support) { + ring = prot->d2hring_edl; + if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0) + goto exit; + } + else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) #else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) #endif /* EWP_EDL */ { ring = prot->h2dring_info_subn; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; ring = prot->d2hring_info_cpln; - if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) goto exit; } @@ -7003,7 +7844,8 @@ exit : /* Write to file */ static -int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn) +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, + const void *user_buf, unsigned long *file_posn) { int ret = 0; @@ -7012,16 +7854,73 @@ int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned lon __FUNCTION__)); return BCME_ERROR; } - ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va), - ((unsigned long)(ring->max_items) * (ring->item_len))); - if (ret < 0) { - DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); - return BCME_ERROR; + if (file) { + ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va), + ((unsigned long)(ring->max_items) * (ring->item_len))); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + ret = BCME_ERROR; + } + } else if (user_buf) { + ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf, + ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn); } - return BCME_OK; + return ret; } #endif /* DHD_DUMP_PCIE_RINGS */ +#ifdef EWP_EDL +/* Write to file */ +static +int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf, + unsigned long *file_posn) +{ + int ret = 0, nitems = 0; + char *buf = NULL, *ptr = NULL; + uint8 *msg_addr = NULL; + uint16 rd = 0; + + if (ring == NULL) { + DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto done; + } + + buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE)); + if (buf == NULL) { + DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__)); + ret = BCME_ERROR; + goto done; + } + ptr = buf; + + for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) { + msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len); + memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE); + ptr += D2HRING_EDL_HDR_SIZE; + } + if (file) { + ret = dhd_os_write_file_posn(file, file_posn, buf, + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM)); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + goto done; + } + } + else { + ret = dhd_export_debug_data(buf, NULL, user_buf, + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn); + } + +done: + if (buf) { + MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE)); + } + return ret; +} +#endif /* EWP_EDL */ + /** Add prot dump output to a buffer */ void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) { @@ -7041,6 +7940,9 @@ void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) dhd->prot->rw_index_sz); bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", h2d_max_txpost, dhd->prot->h2d_max_txpost); + bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt); + bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt); + bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt); } /* Update local copy of dongle statistics */ @@ -7111,6 +8013,9 @@ dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); if (ret_buf == NULL) { + /* HWA TODO, need to get RD pointer from different array + * which HWA will directly write into host memory + */ /* if alloc failed , invalidate cached read ptr */ if (dhd->dma_d2h_ring_upd_support) { ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); @@ -7209,6 +8114,8 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx /* copy ioct payload */ ioct_buf = (void *) prot->ioctbuf.va; + prot->ioctl_fillup_time = OSL_LOCALTIME_NS(); + if (buf) memcpy(ioct_buf, buf, len); @@ -7359,6 +8266,11 @@ dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) ring->wr = 0; ring->rd = 0; ring->curr_rd = 0; + /* Reset hwa_db_type for all rings, + * for data path rings, it will be assigned separately post init + * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init + */ + ring->hwa_db_type = 0; /* CAUTION: ring::base_addr already in Little Endian */ dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, @@ -7433,66 +8345,8 @@ dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) } /* dhd_prot_ring_detach */ -/* - * +---------------------------------------------------------------------------- - * Flowring Pool - * - * Unlike common rings, which are attached very early on (dhd_prot_attach), - * flowrings are dynamically instantiated. Moreover, flowrings may require a - * larger DMA-able buffer. To avoid issues with fragmented cache coherent - * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. - * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. - * - * Each DMA-able buffer may be allocated independently, or may be carved out - * of a single large contiguous region that is registered with the protocol - * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region - * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). - * - * No flowring pool action is performed in dhd_prot_attach(), as the number - * of h2d rings is not yet known. - * - * In dhd_prot_init(), the dongle advertized number of h2d rings is used to - * determine the number of flowrings required, and a pool of msgbuf_rings are - * allocated and a DMA-able buffer (carved or allocated) is attached. - * See: dhd_prot_flowrings_pool_attach() - * - * A flowring msgbuf_ring object may be fetched from this pool during flowring - * creation, using the flowid. Likewise, flowrings may be freed back into the - * pool on flowring deletion. - * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() - * - * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers - * are detached (returned back to the carved region or freed), and the pool of - * msgbuf_ring and any objects allocated against it are freed. - * See: dhd_prot_flowrings_pool_detach() - * - * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a - * state as-if upon an attach. All DMA-able buffers are retained. - * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring - * pool attach will notice that the pool persists and continue to use it. This - * will avoid the case of a fragmented DMA-able region. - * - * +---------------------------------------------------------------------------- - */ - -/* Conversion of a flowid to a flowring pool index */ -#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ - ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) - -/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ -#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ - (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ - DHD_FLOWRINGS_POOL_OFFSET(flowid) - -/* Traverse each flowring in the flowring pool, assigning ring and flowid */ -#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ - for ((flowid) = DHD_FLOWRING_START_FLOWID, \ - (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ - (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ - (ring)++, (flowid)++) - /* Fetch number of H2D flowrings given the total number of h2d rings */ -static uint16 +uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd) { if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) @@ -7564,6 +8418,11 @@ dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { goto attach_fail; } + /* + * TOD0 - Currently flowrings hwa is disabled and can be enabled like below + * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0; + */ + ring->hwa_db_type = 0; } return BCME_OK; @@ -7772,6 +8631,15 @@ __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, /* cache flush */ OSL_CACHE_FLUSH(p, ring->item_len * nitems); + /* For HWA, update db_index and ring mb2 DB and return */ + if (HWA_ACTIVE(dhd) && ring->hwa_db_type) { + db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type; + DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n", + __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index)); + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + return; + } + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { dhd_prot_dma_indx_set(dhd, ring->wr, H2D_DMA_INDX_WR_UPD, ring->idx); @@ -7846,6 +8714,15 @@ dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) uint32 db_index; uint corerev; + /* For HWA, update db_index and ring mb2 DB and return */ + if (HWA_ACTIVE(dhd) && ring->hwa_db_type) { + db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type; + DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n", + __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index)); + prot->mb_2_ring_fn(dhd->bus, db_index, FALSE); + return; + } + /* update read index */ /* If dma'ing h2d indices supported * update the r -indices in the @@ -7853,7 +8730,7 @@ dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) */ if (IDMA_ACTIVE(dhd)) { dhd_prot_dma_indx_set(dhd, ring->rd, - D2H_DMA_INDX_RD_UPD, ring->idx); + D2H_DMA_INDX_RD_UPD, ring->idx); db_index = IDMA_IDX1; if (dhd->bus->sih) { corerev = dhd->bus->sih->buscorerev; @@ -8403,7 +9280,7 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", __FUNCTION__, flow_ring_node->flowid)); - DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); return BCME_NOMEM; } @@ -8428,6 +9305,26 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; flow_create_rqst->max_items = htol16(prot->h2d_max_txpost); flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + flow_create_rqst->if_flags = 0; + +#ifdef DHD_HP2P + /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */ + /* and traffic is not multicast */ + /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */ + /* Allow only one HP2P Flow active at a time */ + if (dhd->hp2p_capable && !dhd->hp2p_ring_active && + flow_ring_node->flow_info.tid == HP2P_PRIO && + (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) && + !ETHER_ISMULTI(flow_create_rqst->da)) { + flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P; + flow_ring_node->hp2p_ring = TRUE; + dhd->hp2p_ring_active = TRUE; + + DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n", + __FUNCTION__, flow_ring_node->flow_info.tid, + flow_ring_node->flowid)); + } +#endif /* DHD_HP2P */ /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core * currently it is not used for priority. so uses solely for ifrm mask @@ -8515,7 +9412,12 @@ dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) ltoh16(resp->cmplt.ring_id), ltoh32(resp->cmn_hdr.request_id))); if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) && - (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID)) { + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) && +#ifdef DHD_HP2P + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) && +#endif /* DHD_HP2P */ + TRUE) { DHD_ERROR(("invalid request ID with d2h ring create complete\n")); return; } @@ -8555,6 +9457,38 @@ dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) #endif /* EWP_EDL */ } +#ifdef DHD_HP2P + if (dhd->prot->d2hring_hp2p_txcpl && + ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) { + if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) { + DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("HPP tx cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE; + dhd->prot->d2hring_hp2p_txcpl->inited = TRUE; + } + if (dhd->prot->d2hring_hp2p_rxcpl && + ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) { + if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) { + DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("HPP rx cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE; + dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE; + } +#endif /* DHD_HP2P */ } static void @@ -8596,6 +9530,12 @@ void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, if (fmt == NULL) { fmt = default_fmt; } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__)); + return; + } + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, @@ -8941,11 +9881,19 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) msgbuf_ring_t *ring; uint16 rd, wr; uint32 dma_buf_len; + uint64 current_time; + ulong ring_tcm_rd_addr; /* dongle address */ + ulong ring_tcm_wr_addr; /* dongle address */ DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n")); DHD_ERROR(("DHD: %s\n", dhd_version)); DHD_ERROR(("Firmware: %s\n", fw_version)); +#ifdef DHD_FW_COREDUMP + DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n")); + DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled)); +#endif /* DHD_FW_COREDUMP */ + DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n")); DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n", prot->device_ipc_version, @@ -8966,13 +9914,31 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", h2d_max_txpost, prot->h2d_max_txpost)); + current_time = OSL_LOCALTIME_NS(); + DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time))); + DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT + " ioctl_ack_time="SEC_USEC_FMT + " ioctl_cmplt_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(prot->ioctl_fillup_time), + GET_SEC_USEC(prot->ioctl_ack_time), + GET_SEC_USEC(prot->ioctl_cmplt_time))); + + /* Check PCIe INT registers */ + if (!dhd_pcie_dump_int_regs(dhd)) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + dhd->bus->is_linkdown = TRUE; + } + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); ring = &prot->h2dring_ctrl_subn; dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len)); DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid" @@ -8986,9 +9952,12 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) ring = &prot->d2hring_ctrl_cpln; dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len)); DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid" @@ -9003,9 +9972,13 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) ring = prot->h2dring_info_subn; if (ring) { dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid" @@ -9020,9 +9993,13 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) ring = prot->d2hring_info_cpln; if (ring) { dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; + DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid" @@ -9037,10 +10014,14 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) ring = &prot->d2hring_tx_cpln; if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid" @@ -9055,10 +10036,14 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) ring = &prot->d2hring_rx_cpln; if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid" @@ -9073,10 +10058,14 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) #ifdef EWP_EDL ring = prot->d2hring_edl; if (ring) { + ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r; + ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w; dma_buf_len = ring->max_items * ring->item_len; - DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx " + "SIZE %d \r\n", ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), - ltoh32(ring->base_addr.low_addr), dma_buf_len)); + ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, + dma_buf_len)); DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); if (dhd->bus->is_linkdown) { DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid" @@ -9093,6 +10082,14 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n", __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted)); +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n", + __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings)); +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ + + DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt)); + DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt)); + DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt)); dhd_pcie_debug_info_dump(dhd); @@ -9919,17 +10916,245 @@ dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp) return dhdp->prot->ioctl_trans_id; } -void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len) +int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len) { + if (!dhd->hscb_enable) { + if (len) { + /* prevent "Operation not supported" dhd message */ + *len = 0; + return BCME_OK; + } + return BCME_UNSUPPORTED; + } + if (va) { - *va = prot->host_scb_buf.va; + *va = dhd->prot->host_scb_buf.va; } if (len) { - *len = prot->host_scb_buf.len; + *len = dhd->prot->host_scb_buf.len; + } + + return BCME_OK; +} + +#ifdef DHD_HP2P +uint32 +dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->pkt_thresh = (uint16)val; + + val = dhd->pkt_thresh; + + return val; +} + +uint32 +dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->time_thresh = (uint16)val; + + val = dhd->time_thresh; + + return val; +} + +uint32 +dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val) +{ + if (set) + dhd->pkt_expiry = (uint16)val; + + val = dhd->pkt_expiry; + + return val; +} + +uint8 +dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable) +{ + uint8 ret = 0; + if (set) { + dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE; + dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE; + + if (enable) { + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP); + } else { + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP); + } + } + ret = dhd->hp2p_infra_enable ? 0x1:0x0; + ret <<= 4; + ret |= dhd->hp2p_enable ? 0x1:0x0; + + return ret; +} + +static void +dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus) +{ + ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts; + hp2p_info_t *hp2p_info; + uint32 dur1; + + hp2p_info = &dhd->hp2p_info[0]; + dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100; + + if (dur1 > (MAX_RX_HIST_BIN - 1)) { + dur1 = MAX_RX_HIST_BIN - 1; + DHD_ERROR(("%s: 0x%x 0x%x\n", + __FUNCTION__, ts->low, ts->high)); + } + + hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++; + return; +} + +static void +dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus) +{ + ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts; + uint16 flowid = txstatus->compl_hdr.flow_ring_id; + uint32 hp2p_flowid, dur1, dur2; + hp2p_info_t *hp2p_info; + + hp2p_flowid = dhd->bus->max_submission_rings - + dhd->bus->max_cmn_rings - flowid + 1; + hp2p_info = &dhd->hp2p_info[hp2p_flowid]; + ts = (ts_timestamp_t *)&(txstatus->ts); + + dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000; + if (dur1 > (MAX_TX_HIST_BIN - 1)) { + dur1 = MAX_TX_HIST_BIN - 1; + DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high)); + } + hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++; + + dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000; + if (dur2 > (MAX_TX_HIST_BIN - 1)) { + dur2 = MAX_TX_HIST_BIN - 1; + DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high)); + } + + hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++; + return; +} + +enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer) +{ + hp2p_info_t *hp2p_info; + unsigned long flags; + dhd_pub_t *dhdp; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + hp2p_info = container_of(timer, hp2p_info_t, timer.timer); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dhdp = hp2p_info->dhd_pub; + if (!dhdp) { + goto done; + } + + DHD_INFO(("%s: pend_item = %d flowid = %d\n", + __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count, + hp2p_info->flowid)); + + flags = dhd_os_hp2plock(dhdp); + + dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid); + hp2p_info->hrtimer_init = FALSE; + hp2p_info->num_timer_limit++; + + dhd_os_hp2punlock(dhdp, flags); +done: + return HRTIMER_NORESTART; +} + +static void +dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid) +{ + hp2p_info_t *hp2p_info; + uint16 hp2p_flowid; + + hp2p_flowid = dhd->bus->max_submission_rings - + dhd->bus->max_cmn_rings - flowid + 1; + hp2p_info = &dhd->hp2p_info[hp2p_flowid]; + + if (ring->pend_items_count == dhd->pkt_thresh) { + dhd_prot_txdata_write_flush(dhd, flowid); + + hp2p_info->hrtimer_init = FALSE; + hp2p_info->ring = NULL; + hp2p_info->num_pkt_limit++; + hrtimer_cancel(&hp2p_info->timer.timer); + + DHD_INFO(("%s: cancel hrtimer for flowid = %d \n" + "hp2p_flowid = %d pkt_thresh = %d\n", + __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh)); + } else { + if (hp2p_info->hrtimer_init == FALSE) { + hp2p_info->hrtimer_init = TRUE; + hp2p_info->flowid = flowid; + hp2p_info->dhd_pub = dhd; + hp2p_info->ring = ring; + hp2p_info->num_timer_start++; + + tasklet_hrtimer_start(&hp2p_info->timer, + ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL); + + DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n", + __FUNCTION__, flowid, hp2p_flowid)); + } } + return; +} + +static void +dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc) +{ + uint64 ts; + + ts = local_clock(); + do_div(ts, 1000); + + txdesc->metadata_buf_len = 0; + txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF); + txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF); + txdesc->exp_time = dhd->pkt_expiry; + + DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n", + __FUNCTION__, txdesc->metadata_buf_addr.high_addr, + txdesc->metadata_buf_addr.low_addr, + txdesc->exp_time)); + + return; } +#endif /* DHD_HP2P */ -void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff) +#ifdef DHD_MAP_LOGGING +void +dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp) { - memcpy(buff, (char*)prot->host_scb_buf.va + offset, length); + dhd_prot_debug_info_print(dhdp); + OSL_DMA_MAP_DUMP(dhdp->osh); +#ifdef DHD_MAP_PKTID_LOGGING + dhd_pktid_logging_dump(dhdp); +#endif /* DHD_MAP_PKTID_LOGGING */ +#ifdef DHD_FW_COREDUMP + dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT; +#ifdef DNGL_AXI_ERROR_LOGGING + dhdp->memdump_enabled = DUMP_MEMFILE; + dhd_bus_get_mem_dump(dhdp); +#else + dhdp->memdump_enabled = DUMP_MEMONLY; + dhd_bus_mem_dump(dhdp); +#endif /* DNGL_AXI_ERROR_LOGGING */ +#endif /* DHD_FW_COREDUMP */ } +#endif /* DHD_MAP_LOGGING */ diff --git a/bcmdhd.100.10.315.x/dhd_pcie.c b/bcmdhd.100.10.315.x/dhd_pcie.c index 7fb69ce..de29e2d 100644 --- a/bcmdhd.100.10.315.x/dhd_pcie.c +++ b/bcmdhd.100.10.315.x/dhd_pcie.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for PCIE * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie.c 771178 2018-07-09 08:34:50Z $ + * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $ */ /* include files */ @@ -32,11 +32,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #if defined(DHD_DEBUG) #include @@ -57,6 +59,7 @@ #include #include #include +#include #ifdef DHDTCPACK_SUPPRESS #include #endif /* DHDTCPACK_SUPPRESS */ @@ -71,11 +74,31 @@ #include #endif /* DEBUGGER || DHD_DSCOPE */ +#ifdef DNGL_AXI_ERROR_LOGGING +#include +#include +#endif /* DNGL_AXI_ERROR_LOGGING */ + +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) +#include +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + +#include #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */ #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ +#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512 +#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512 + +#define ARMCR4REG_CORECAP (0x4/sizeof(uint32)) +#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32)) +#define ACC_MPU_SHIFT 25 +#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT) + +#define REG_WORK_AROUND (0x1e4/sizeof(uint32)) + #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ @@ -120,11 +143,16 @@ bool force_trap_bad_h2d_phase = 0; int dhd_dongle_memsize; int dhd_dongle_ramsize; +struct dhd_bus *g_dhd_bus = NULL; +#ifdef DNGL_AXI_ERROR_LOGGING +static void dhd_log_dump_axi_error(uint8 *axi_err); +#endif /* DNGL_AXI_ERROR_LOGGING */ + static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); static int dhdpcie_bus_readconsole(dhd_bus_t *bus); #if defined(DHD_FW_COREDUMP) -struct dhd_bus *g_dhd_bus = NULL; static int dhdpcie_mem_dump(dhd_bus_t *bus); +static int dhdpcie_get_mem_dump(dhd_bus_t *bus); #endif /* DHD_FW_COREDUMP */ static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); @@ -149,6 +177,7 @@ static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag); static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); +static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr); static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); @@ -164,13 +193,10 @@ static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size); static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); static void dhdpcie_fw_trap(dhd_bus_t *bus); static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +static void dhdpcie_handle_mb_data(dhd_bus_t *bus); extern void dhd_dpc_enable(dhd_pub_t *dhdp); extern void dhd_dpc_kill(dhd_pub_t *dhdp); -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM -static void dhdpcie_handle_mb_data(dhd_bus_t *bus); -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ - #ifdef IDLE_TX_FLOW_MGMT static void dhd_bus_check_idle_scan(dhd_bus_t *bus); static void dhd_bus_idle_scan(dhd_bus_t *bus); @@ -192,7 +218,7 @@ static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus); #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); -static void dhdpcie_cto_error_recovery(struct dhd_bus *bus); +static int dhdpcie_cto_error_recovery(struct dhd_bus *bus); static int dhdpcie_init_d11status(struct dhd_bus *bus); @@ -201,9 +227,12 @@ static int dhdpcie_wrt_rnd(struct dhd_bus *bus); extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd); extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); -#ifdef DHD_SSSR_DUMP -static int dhdpcie_sssr_dump(dhd_pub_t *dhd); -#endif /* DHD_SSSR_DUMP */ +#ifdef DHD_HP2P +extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer); +static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val); +#endif // endif +#define NUM_PATTERNS 2 +static bool dhd_bus_tcm_test(struct dhd_bus *bus); /* IOVar table */ enum { @@ -245,6 +274,7 @@ enum { IOV_CTO_PREVENTION, IOV_PCIE_WD_RESET, IOV_DUMP_DONGLE, + IOV_HWA_ENAB_BMAP, IOV_IDMA_ENABLE, IOV_IFRM_ENABLE, IOV_CLEAR_RING, @@ -256,7 +286,14 @@ enum { IOV_INB_DW_ENABLE, IOV_CTO_THRESHOLD, IOV_HSCBSIZE, /* get HSCB buffer size */ - IOV_HSCBBYTES, /* copy HSCB buffer */ + IOV_HP2P_ENABLE, + IOV_HP2P_PKT_THRESHOLD, + IOV_HP2P_TIME_THRESHOLD, + IOV_HP2P_PKT_EXPIRY, + IOV_HP2P_TXCPL_MAXITEMS, + IOV_HP2P_RXCPL_MAXITEMS, + IOV_EXTDTXS_IN_TXCPL, + IOV_HOSTRDY_AFTER_INIT, IOV_PCIE_LAST /**< unused IOVAR */ }; @@ -271,8 +308,8 @@ const bcm_iovar_t dhdpcie_iovars[] = { {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 }, {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, - {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) }, - {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 }, + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)}, + {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 }, {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 }, {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 }, @@ -301,6 +338,7 @@ const bcm_iovar_t dhdpcie_iovars[] = { {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))}, {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 }, + {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 }, {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 }, {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 }, {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 }, @@ -311,11 +349,20 @@ const bcm_iovar_t dhdpcie_iovars[] = { {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 }, {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 }, - {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, +#ifdef DHD_HP2P + {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, + {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, +#endif // endif + {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 }, + {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 }, {NULL, 0, 0, 0, 0, 0 } }; -#define MAX_READ_TIMEOUT 5 * 1000 * 1000 +#define MAX_READ_TIMEOUT 2 * 1000 * 1000 #ifndef DHD_RXBOUND #define DHD_RXBOUND 64 @@ -411,6 +458,22 @@ dhd_bus_db1_addr_1_get(struct dhd_bus *bus) return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1); } +/* + * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request + */ +static INLINE void +dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable) +{ + if (enable) { + si_corereg(bus->sih, bus->sih->buscoreidx, offset, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, offset, + SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0); + } +} + static INLINE void _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus) { @@ -497,8 +560,8 @@ _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus) { uint mask, val; - mask = SRPWR_DMN_ALL_MASK; - val = SRPWR_DMN_ALL_MASK; + mask = SRPWR_DMN_ALL_MASK(bus->sih); + val = SRPWR_DMN_ALL_MASK(bus->sih); si_srpwr_request(bus->sih, mask, val); } @@ -514,11 +577,11 @@ dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus) } static INLINE void -_dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus *bus) +_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus) { uint mask; - mask = SRPWR_DMN3_MACMAIN_MASK | SRPWR_DMN2_MACAUX_MASK; + mask = SRPWR_DMN_ALL_MASK(bus->sih); si_srpwr_request(bus->sih, mask, 0); } @@ -529,7 +592,7 @@ dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus) unsigned long flags = 0; DHD_GENERAL_LOCK(bus->dhd, flags); - _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus); + _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); } @@ -548,6 +611,7 @@ dhdpcie_chip_support_msi(dhd_bus_t *bus) si_chipid(bus->sih) == BCM4375_CHIP_ID || si_chipid(bus->sih) == BCM4362_CHIP_ID || si_chipid(bus->sih) == BCM43751_CHIP_ID || + si_chipid(bus->sih) == BCM43752_CHIP_ID || si_chipid(bus->sih) == BCM4361_CHIP_ID || si_chipid(bus->sih) == BCM4359_CHIP_ID) { return FALSE; @@ -612,13 +676,17 @@ int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, ret = BCME_NORESOURCE; break; } + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; + bus->dhd->hostrdy_after_init = TRUE; bus->db1_for_mb = TRUE; bus->dhd->hang_report = TRUE; bus->use_mailbox = FALSE; bus->use_d0_inform = FALSE; bus->intr_enabled = FALSE; bus->flr_force_fail = FALSE; + /* By default disable HWA and enable it via iovar */ + bus->hwa_enab_bmap = 0; /* update the dma indices if set through module parameter. */ if (dma_ring_indices != 0) { dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices); @@ -640,11 +708,14 @@ int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, bus->d2h_intr_method = PCIE_INTX; #endif /* DHD_MSI_SUPPORT */ +#ifdef DHD_HP2P + bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING; + bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING; +#endif /* DHD_HP2P */ + DHD_TRACE(("%s: EXIT SUCCESS\n", __FUNCTION__)); -#ifdef DHD_FW_COREDUMP g_dhd_bus = bus; -#endif // endif *bus_ptr = bus; return ret; } while (0); @@ -727,14 +798,94 @@ uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) return bus->sih->chippkg; } +/** Conduct Loopback test */ +int +dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) +{ + dma_xfer_info_t dmaxfer_lpbk; + int ret = BCME_OK; + +#define PCIE_DMAXFER_LPBK_LENGTH 4096 + memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t)); + dmaxfer_lpbk.version = DHD_DMAXFER_VERSION; + dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t); + dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH; + dmaxfer_lpbk.type = type; + dmaxfer_lpbk.should_wait = TRUE; + + ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0, + (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET); + if (ret < 0) { + DHD_ERROR(("failed to start PCIe Loopback Test!!! " + "Type:%d Reason:%d\n", type, ret)); + return ret; + } + + if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) { + DHD_ERROR(("failed to check PCIe Loopback Test!!! " + "Type:%d Status:%d Error code:%d\n", type, + dmaxfer_lpbk.status, dmaxfer_lpbk.error_code)); + ret = BCME_ERROR; + } else { + DHD_ERROR(("successful to check PCIe Loopback Test" + " Type:%d\n", type)); + } +#undef PCIE_DMAXFER_LPBK_LENGTH + + return ret; +} + +/* Log the lastest DPC schedule time */ +void +dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) +{ + dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS(); +} + +/* Check if there is DPC scheduling errors */ +bool +dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool sched_err; + + if (bus->dpc_entry_time < bus->isr_exit_time) { + /* Kernel doesn't schedule the DPC after processing PCIe IRQ */ + sched_err = TRUE; + } else if (bus->dpc_entry_time < bus->resched_dpc_time) { + /* Kernel doesn't schedule the DPC after DHD tries to reschedule + * the DPC due to pending work items to be processed. + */ + sched_err = TRUE; + } else { + sched_err = FALSE; + } + + if (sched_err) { + /* print out minimum timestamp info */ + DHD_ERROR(("isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT + " dpc_entry_time="SEC_USEC_FMT + "\ndpc_exit_time="SEC_USEC_FMT + " dpc_sched_time="SEC_USEC_FMT + " resched_dpc_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time), + GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->dpc_exit_time), + GET_SEC_USEC(bus->dpc_sched_time), + GET_SEC_USEC(bus->resched_dpc_time))); + } + + return sched_err; +} + /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */ uint32 dhdpcie_bus_intstatus(dhd_bus_t *bus) { uint32 intstatus = 0; -#ifndef DHD_READ_INTSTATUS_IN_DPC uint32 intmask = 0; -#endif /* DHD_READ_INTSTATUS_IN_DPC */ if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__)); @@ -749,19 +900,22 @@ dhdpcie_bus_intstatus(dhd_bus_t *bus) /* this is a PCIE core register..not a config register... */ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); -#ifndef DHD_READ_INTSTATUS_IN_DPC /* this is a PCIE core register..not a config register... */ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0); - intstatus &= intmask; -#endif /* DHD_READ_INTSTATUS_IN_DPC */ /* Is device removed. intstatus & intmask read 0xffffffff */ - if (intstatus == (uint32)-1) { + if (intstatus == (uint32)-1 || intmask == (uint32)-1) { DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__)); + DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n", + __FUNCTION__, intstatus, intmask)); bus->is_linkdown = TRUE; dhd_pcie_debug_info_dump(bus->dhd); return intstatus; } +#ifndef DHD_READ_INTSTATUS_IN_DPC + intstatus &= intmask; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + /* * The fourth argument to si_corereg is the "mask" fields of the register to update * and the fifth field is the "value" to update. Now if we are interested in only @@ -777,6 +931,49 @@ dhdpcie_bus_intstatus(dhd_bus_t *bus) return intstatus; } +void +dhdpcie_cto_recovery_handler(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + int ret; + + /* Disable PCIe Runtime PM to avoid D3_ACK timeout. + */ + DHD_DISABLE_RUNTIME_PM(dhd); + + /* Sleep for 1 seconds so that any AXI timeout + * if running on ALP clock also will be captured + */ + OSL_SLEEP(1000); + + /* reset backplane and cto, + * then access through pcie is recovered. + */ + ret = dhdpcie_cto_error_recovery(bus); + if (!ret) { + /* Waiting for backplane reset */ + OSL_SLEEP(10); + /* Dump debug Info */ + dhd_prot_debug_info_print(bus->dhd); + /* Dump console buffer */ + dhd_bus_dump_console_buffer(bus); +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (!bus->is_linkdown && bus->dhd->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + bus->dhd->collect_sssr = TRUE; +#endif /* DHD_SSSR_DUMP */ + bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + } + bus->is_linkdown = TRUE; + bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT; + /* Send HANG event */ + dhd_os_send_hang_message(bus->dhd); +} + /** * Name: dhdpcie_bus_isr * Parameters: @@ -822,11 +1019,31 @@ dhdpcie_bus_isr(dhd_bus_t *bus) /* read pci_intstatus */ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : Invalid intstatus for cto recovery\n", + __FUNCTION__)); + dhdpcie_disable_irq_nosync(bus); + break; + } + if (intstatus & PCI_CTO_INT_MASK) { - /* reset backplane and cto, - * then access through pcie is recovered. + DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE " + "intstat=0x%x enab=%d\n", __FUNCTION__, + intstatus, bus->cto_enable)); + bus->cto_triggered = 1; + /* + * DAR still accessible */ - dhdpcie_cto_error_recovery(bus); + dhd_bus_dump_dar_registers(bus); + + /* Disable further PCIe interrupts */ + dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ + /* Stop Tx flow */ + dhd_bus_stop_queue(bus); + + /* Schedule CTO recovery */ + dhd_schedule_cto_recovery(bus->dhd); + return TRUE; } } @@ -845,6 +1062,8 @@ dhdpcie_bus_isr(dhd_bus_t *bus) * so disable this for EFI */ DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__)); + bus->non_ours_irq_count++; + bus->last_non_ours_irq_time = OSL_LOCALTIME_NS(); break; } @@ -1022,8 +1241,7 @@ dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr) OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32), bus->saved_config.bar0_win); - OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32), - bus->saved_config.bar1_win); + dhdpcie_setbar1win(bus, bus->saved_config.bar1_win); return BCME_OK; } @@ -1103,6 +1321,13 @@ dhdpcie_cc_watchdog_reset(dhd_bus_t *bus) void dhdpcie_dongle_reset(dhd_bus_t *bus) { + /* if the pcie link is down, watchdog reset + * should not be done, as it may hang + */ + if (bus->is_linkdown) { + return; + } + /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */ if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) { #ifdef DHD_USE_BP_RESET @@ -1163,21 +1388,34 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) goto fail; } - if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { + /* Configure CTO Prevention functionality */ #if defined(BCMFPGA_HW) - DHD_ERROR(("Disable CTO\n")); - bus->cto_enable = FALSE; + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; #else +#if defined(BCMPCIE_CTO_PREVENTION) + if (bus->sih->buscorerev >= 24) { DHD_ERROR(("Enable CTO\n")); bus->cto_enable = TRUE; -#endif // endif - dhdpcie_cto_init(bus, bus->cto_enable); + } else +#endif /* BCMPCIE_CTO_PREVENTION */ + { + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; + } +#endif /* BCMFPGA_HW */ + + if (PCIECTO_ENAB(bus)) { + dhdpcie_cto_init(bus, TRUE); + } + + if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { /* * HW JIRA - CRWLPCIEGEN2-672 * Producer Index Feature which is used by F1 gets reset on F0 FLR * fixed in REV68 */ - if (bus->sih->buscorerev == 66) { + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { dhdpcie_ssreset_dis_enum_rst(bus); } @@ -1192,6 +1430,26 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) dhd_bus_pcie_pwr_req_nolock(bus); } + /* Get info on the ARM and SOCRAM cores... */ + /* Should really be qualified by device id */ + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + bus->coreid = si_coreid(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + /* CA7 requires coherent bits on */ + if (bus->coreid == ARMCA7_CORE_ID) { + val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, + (val | PCIE_BARCOHERENTACCEN_MASK)); + } + /* Olympic EFI requirement - stop driver load if FW is already running * need to do this here before pcie_watchdog_reset, because * pcie_watchdog_reset will put the ARM back into halt state @@ -1237,6 +1495,20 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) } #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */ + /* need to set the force_bt_quiesce flag here + * before calling dhdpcie_dongle_flr_or_pwr_toggle + */ + bus->force_bt_quiesce = TRUE; + /* + * For buscorerev = 66 and after, F0 FLR should be done independent from F1. + * So don't need BT quiesce. + */ + if (bus->sih->buscorerev >= 66) { + bus->force_bt_quiesce = FALSE; + } + + dhdpcie_dongle_flr_or_pwr_toggle(bus); + si_setcore(bus->sih, PCIE2_CORE_ID, 0); sbpcieregs = (sbpcieregs_t*)(bus->regs); @@ -1245,18 +1517,6 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) val = R_REG(osh, &sbpcieregs->configdata); W_REG(osh, &sbpcieregs->configdata, val); - /* Get info on the ARM and SOCRAM cores... */ - /* Should really be qualified by device id */ - if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || - (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || - (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || - (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { - bus->armrev = si_corerev(bus->sih); - } else { - DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); - goto fail; - } - if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not * adjusted. @@ -1330,6 +1590,9 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) case BCM43751_CHIP_ID: bus->dongle_ram_base = CR4_43751_RAM_BASE; break; + case BCM43752_CHIP_ID: + bus->dongle_ram_base = CR4_43752_RAM_BASE; + break; case BCM4375_CHIP_ID: case BCM4369_CHIP_ID: bus->dongle_ram_base = CR4_4369_RAM_BASE; @@ -1371,9 +1634,24 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) if (MULTIBP_ENAB(bus->sih)) { dhd_bus_pcie_pwr_req_clear_nolock(bus); - } - bus->force_bt_quiesce = TRUE; + /* + * One time clearing of Common Power Domain since HW default is set + * Needs to be after FLR because FLR resets PCIe enum back to HW defaults + * for 4378B0 (rev 68). + * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672 + */ + si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0); + + /* + * WAR to fix ARM cold boot; + * Assert WL domain in DAR helps but not enum + */ + if (bus->sih->buscorerev >= 68) { + dhd_bus_pcie_pwr_req_wl_domain(bus, + DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE); + } + } return 0; @@ -1410,19 +1688,27 @@ void dhdpcie_bus_intr_enable(dhd_bus_t *bus) { DHD_TRACE(("%s Enter\n", __FUNCTION__)); - if (bus && bus->sih && !bus->is_linkdown) { - /* Skip after recieving D3 ACK */ - if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { - return; - } - if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || - (bus->sih->buscorerev == 4)) { - dhpcie_bus_unmask_interrupt(bus); - } else { - si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, - bus->def_intmask, bus->def_intmask); + if (bus) { + if (bus->sih && !bus->is_linkdown) { + /* Skip after recieving D3 ACK */ + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { + #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) + dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, + bus->def_intmask, TRUE); + #endif + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, + bus->def_intmask, bus->def_intmask); + } } + } + DHD_TRACE(("%s Exit\n", __FUNCTION__)); } @@ -1444,6 +1730,7 @@ dhdpcie_bus_intr_disable(dhd_bus_t *bus) bus->def_intmask, 0); } } + DHD_TRACE(("%s Exit\n", __FUNCTION__)); } @@ -1509,13 +1796,14 @@ dhdpcie_bus_remove_prep(dhd_bus_t *bus) DHD_TRACE(("%s Enter\n", __FUNCTION__)); DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_GENERAL_UNLOCK(bus->dhd, flags); dhd_os_sdlock(bus->dhd); if (bus->sih && !bus->dhd->dongle_isolation) { - if (bus->sih->buscorerev == 66) { + if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) { dhd_bus_pcie_pwr_req_reload_war(bus); } @@ -1526,7 +1814,14 @@ dhdpcie_bus_remove_prep(dhd_bus_t *bus) */ if (!bus->is_linkdown) { +#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH + /* for efi, depending on bt over pcie mode + * we either power toggle or do F0 FLR + * from dhdpcie_bus_release dongle. So no need to + * do dongle reset from here + */ dhdpcie_dongle_reset(bus); +#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */ } bus->dhd->is_pcie_watchdog_reset = TRUE; @@ -1554,6 +1849,23 @@ dhd_deinit_bus_lock(dhd_bus_t *bus) } } +void +dhd_init_backplane_access_lock(dhd_bus_t *bus) +{ + if (!bus->backplane_access_lock) { + bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh); + } +} + +void +dhd_deinit_backplane_access_lock(dhd_bus_t *bus) +{ + if (bus->backplane_access_lock) { + dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock); + bus->backplane_access_lock = NULL; + } +} + /** Detach and free everything */ void dhdpcie_bus_release(dhd_bus_t *bus) @@ -1585,6 +1897,7 @@ dhdpcie_bus_release(dhd_bus_t *bus) dhdpcie_free_irq(bus); } dhd_deinit_bus_lock(bus); + dhd_deinit_backplane_access_lock(bus); /** * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to * access Dongle registers. @@ -1620,6 +1933,7 @@ dhdpcie_bus_release(dhd_bus_t *bus) /* Finally free bus info */ MFREE(osh, bus, sizeof(dhd_bus_t)); + g_dhd_bus = NULL; } DHD_TRACE(("%s: Exit\n", __FUNCTION__)); @@ -1636,6 +1950,11 @@ dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bo return; } + if (bus->is_linkdown) { + DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__)); + return; + } + if (bus->sih) { if (!dongle_isolation && @@ -1643,6 +1962,8 @@ dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bo dhdpcie_dongle_reset(bus); } + dhdpcie_dongle_flr_or_pwr_toggle(bus); + if (bus->ltrsleep_on_unload) { si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); @@ -1726,7 +2047,6 @@ dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) /** Stop bus module: clear pending frames, disable data flow */ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) { - uint32 status; unsigned long flags, flags_bus; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -1742,6 +2062,7 @@ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) DHD_DISABLE_RUNTIME_PM(bus->dhd); DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_GENERAL_UNLOCK(bus->dhd, flags); @@ -1753,8 +2074,11 @@ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) dhdpcie_bus_intr_disable(bus); DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); - status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); - dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + if (!bus->is_linkdown) { + uint32 status; + status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + } if (!dhd_download_fw_on_driverload) { dhd_dpc_kill(bus->dhd); @@ -1956,8 +2280,7 @@ static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1" #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0" #define MAX_VID_LEN 8 -#define MAX_VNAME_LEN 30 -#define CIS_TUPLE_HDR_LEN 2 +#define CIS_TUPLE_HDR_LEN 2 #if defined(BCM4361_CHIP) #define CIS_TUPLE_START_ADDRESS 0x18011110 #define CIS_TUPLE_END_ADDRESS 0x18011167 @@ -1965,8 +2288,8 @@ static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path #define CIS_TUPLE_START_ADDRESS 0x18011120 #define CIS_TUPLE_END_ADDRESS 0x18011177 #endif /* defined(BCM4361_CHIP) */ -#define CIS_TUPLE_MAX_COUNT (CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\ - + 1) / sizeof(uint32) +#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\ + + 1) / sizeof(uint32)) #define CIS_TUPLE_TAG_START 0x80 #define CIS_TUPLE_TAG_VENDOR 0x81 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b @@ -2033,14 +2356,21 @@ naming_info_t bcm4375_naming_table[] = { { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} }, { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} }, { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} }, + { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} }, { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} }, - { {"e43_cs51"}, {"_CS00_semco_b1"}, {"_b1"} }, + { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} }, + { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} }, { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} }, { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} }, { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} }, { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} }, { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} }, - { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} } + { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} }, + { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} }, + { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} }, + { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} }, + { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} }, + { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} } }; static naming_info_t * @@ -2413,36 +2743,6 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, return ret; } -void -dhd_set_path_params(struct dhd_bus *bus) -{ - /* External conf takes precedence if specified */ - dhd_conf_preinit(bus->dhd); - - if (bus->dhd->clm_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); - } - dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); - if (bus->dhd->conf_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); - } -#ifdef CONFIG_PATH_AUTO_SELECT - dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); -#endif - - dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); - - dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); - dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); - dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); - - printf("Final fw_path=%s\n", bus->fw_path); - printf("Final nv_path=%s\n", bus->nv_path); - printf("Final clm_path=%s\n", bus->dhd->clm_path); - printf("Final conf_path=%s\n", bus->dhd->conf_path); - -} - void dhd_set_bus_params(struct dhd_bus *bus) { @@ -2507,7 +2807,7 @@ dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) DHD_OS_WAKE_LOCK(bus->dhd); - dhd_set_path_params(bus); + dhd_conf_set_path_params(bus->dhd, NULL, NULL, bus->fw_path, bus->nv_path); dhd_set_bus_params(bus); ret = _dhdpcie_download_firmware(bus); @@ -2529,11 +2829,6 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) { int bcmerror = BCME_ERROR; int offset = 0; -#if defined(DHD_FW_MEM_CORRUPTION) - uint8 *p_org_fw = NULL; - uint32 org_fw_size = 0; - uint32 fw_write_offset = 0; -#endif /* DHD_FW_MEM_CORRUPTION */ int len = 0; bool store_reset; char *imgbuf = NULL; @@ -2542,6 +2837,20 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) int offset_end = bus->ramsize; uint32 file_size = 0, read_len = 0; +#if defined(DHD_FW_MEM_CORRUPTION) + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + dhd_tcm_test_enable = TRUE; + } else { + dhd_tcm_test_enable = FALSE; + } +#endif /* DHD_FW_MEM_CORRUPTION */ + DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable)); + /* TCM check */ + if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) { + DHD_ERROR(("dhd_bus_tcm_test failed\n")); + bcmerror = BCME_ERROR; + goto err; + } DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); /* Should succeed in opening image if it is actually given through registry @@ -2576,26 +2885,6 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); } -#if defined(DHD_FW_MEM_CORRUPTION) - if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { - org_fw_size = file_size; -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd, - DHD_PREALLOC_MEMDUMP_RAM, org_fw_size); -#else - p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ - if (p_org_fw == NULL) { - DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n", - __FUNCTION__, org_fw_size)); - bcmerror = BCME_NOMEM; - goto err; - } else { - memset(p_org_fw, 0, org_fw_size); - } - } -#endif /* DHD_FW_MEM_CORRUPTION */ - /* check if CR4/CA7 */ store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || si_setcore(bus->sih, ARMCA7_CORE_ID, 0)); @@ -2606,7 +2895,6 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) bcmerror = BCME_ERROR; goto err; } - read_len += len; if (read_len > file_size) { DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;" @@ -2646,12 +2934,6 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); } offset += MEMBLOCK; -#if defined(DHD_FW_MEM_CORRUPTION) - if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { - memcpy((p_org_fw + fw_write_offset), memptr, len); - fw_write_offset += len; - } -#endif /* DHD_FW_MEM_CORRUPTION */ if (offset >= offset_end) { DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", @@ -2664,70 +2946,7 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) break; } } -#ifdef DHD_FW_MEM_CORRUPTION - /* Read and compare the downloaded code */ - if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { - unsigned char *p_readback_buf = NULL; - uint32 compared_len; - uint32 remaining_len = 0; - - compared_len = 0; - p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK); - if (p_readback_buf == NULL) { - DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n", - __FUNCTION__, MEMBLOCK)); - bcmerror = BCME_NOMEM; - goto compare_err; - } - /* Read image to verify downloaded contents. */ - offset = bus->dongle_ram_base; - - while (compared_len < org_fw_size) { - memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK); - remaining_len = org_fw_size - compared_len; - - if (remaining_len >= MEMBLOCK) { - len = MEMBLOCK; - } else { - len = remaining_len; - } - bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, - (uint8 *)p_readback_buf, len); - if (bcmerror) { - DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", - __FUNCTION__, bcmerror, MEMBLOCK, offset)); - goto compare_err; - } - - if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) { - DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n", - __FUNCTION__, compared_len)); - bcmerror = BCME_ERROR; - goto compare_err; - } - - compared_len += len; - offset += len; - } - DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); - -compare_err: - if (p_readback_buf) { - MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK); - } - } -#endif /* DHD_FW_MEM_CORRUPTION */ - err: -#if defined(DHD_FW_MEM_CORRUPTION) - if (p_org_fw) { -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size); -#else - VMFREE(bus->dhd->osh, p_org_fw, org_fw_size); -#endif // endif - } -#endif /* DHD_FW_MEM_CORRUPTION */ if (memblock) { MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); if (dhd_msg_level & DHD_TRACE_VAL) { @@ -3228,14 +3447,14 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if (mbuffer == NULL) { DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); bcmerror = BCME_NOMEM; - goto done; + goto done2; } } if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); bcmerror = BCME_NOMEM; - goto done; + goto done2; } DHD_GENERAL_LOCK(bus->dhd, flags); DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd); @@ -3245,7 +3464,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) dhd_bus_pcie_pwr_req(bus); } if ((bcmerror = dhdpcie_readshared(bus)) < 0) { - goto done; + goto done1; } bcm_binit(&strbuf, data, size); @@ -3276,7 +3495,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, bus->pcie_sh->assert_exp_addr, (uint8 *)str, maxstrlen)) < 0) { - goto done; + goto done1; } str[maxstrlen - 1] = '\0'; @@ -3288,7 +3507,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, bus->pcie_sh->assert_file_addr, (uint8 *)str, maxstrlen)) < 0) { - goto done; + goto done1; } str[maxstrlen - 1] = '\0'; @@ -3304,7 +3523,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) { bus->dhd->dongle_trap_occured = TRUE; - goto done; + goto done1; } dhd_bus_dump_trap_info(bus, &strbuf); } @@ -3313,9 +3532,6 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); - /* wake up IOCTL wait event */ - dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); - dhd_bus_dump_console_buffer(bus); dhd_prot_debug_info_print(bus->dhd); @@ -3323,9 +3539,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) /* save core dump or write to a file */ if (bus->dhd->memdump_enabled) { #ifdef DHD_SSSR_DUMP - if (bus->dhd->sssr_inited) { - dhdpcie_sssr_dump(bus->dhd); - } + bus->dhd->collect_sssr = TRUE; #endif /* DHD_SSSR_DUMP */ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; dhdpcie_mem_dump(bus); @@ -3339,19 +3553,23 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) bus->dhd->dongle_trap_occured = TRUE; } + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + dhd_schedule_reset(bus->dhd); } +done1: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + DHD_GENERAL_LOCK(bus->dhd, flags); DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd); dhd_os_busbusy_wake(bus->dhd); DHD_GENERAL_UNLOCK(bus->dhd, flags); - -done: - if (MULTIBP_ENAB(bus->sih)) { - dhd_bus_pcie_pwr_req_clear(bus); - } +done2: if (mbuffer) MFREE(bus->dhd->osh, mbuffer, msize); if (str) @@ -3407,44 +3625,47 @@ void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) #if defined(DHD_FW_COREDUMP) static int -dhdpcie_mem_dump(dhd_bus_t *bus) +dhdpcie_get_mem_dump(dhd_bus_t *bus) { - int ret = 0; - int size; /* Full mem size */ - int start = bus->dongle_ram_base; /* Start address */ + int ret = BCME_OK; + int size = 0; + int start = 0; int read_size = 0; /* Read size of each iteration */ - uint8 *buf = NULL, *databuf = NULL; + uint8 *p_buf = NULL, *databuf = NULL; -#ifdef EXYNOS_PCIE_DEBUG - exynos_pcie_register_dump(1); -#endif /* EXYNOS_PCIE_DEBUG */ + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } -#ifdef DHD_PCIE_NATIVE_RUNTIMEPM - if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) + if (!bus->dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); return BCME_ERROR; -#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } + + size = bus->ramsize; /* Full mem size */ + start = bus->dongle_ram_base; /* Start address */ /* Get full mem size */ - size = bus->ramsize; - buf = dhd_get_fwdump_buf(bus->dhd, size); - if (!buf) { - DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + p_buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!p_buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", + __FUNCTION__, size)); return BCME_ERROR; } /* Read mem content */ DHD_TRACE_HW4(("Dump dongle memory\n")); - databuf = buf; - while (size) - { + databuf = p_buf; + while (size > 0) { read_size = MIN(MEMBLOCK, size); - if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) - { + ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size); + if (ret) { DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); #ifdef DHD_DEBUG_UART bus->dhd->memdump_success = FALSE; #endif /* DHD_DEBUG_UART */ - return BCME_ERROR; + break; } DHD_TRACE((".")); @@ -3453,11 +3674,44 @@ dhdpcie_mem_dump(dhd_bus_t *bus) start += read_size; databuf += read_size; } -#ifdef DHD_DEBUG_UART - bus->dhd->memdump_success = TRUE; -#endif /* DHD_DEBUG_UART */ - dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + return ret; +} + +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + dhd_pub_t *dhdp; + int ret; + +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + + dhdp = bus->dhd; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); + return BCME_ERROR; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + ret = dhdpcie_get_mem_dump(bus); + if (ret) { + DHD_ERROR(("%s: failed to get mem dump, err=%d\n", + __FUNCTION__, ret)); + return ret; + } + + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); /* buf, actually soc_ram free handled in dhd_{free,clear} */ #ifdef DHD_PCIE_NATIVE_RUNTIMEPM @@ -3468,6 +3722,17 @@ dhdpcie_mem_dump(dhd_bus_t *bus) return ret; } +int +dhd_bus_get_mem_dump(dhd_pub_t *dhdp) +{ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdpcie_get_mem_dump(dhdp->bus); +} + int dhd_bus_mem_dump(dhd_pub_t *dhdp) { @@ -3479,7 +3744,10 @@ dhd_bus_mem_dump(dhd_pub_t *dhdp) return BCME_ERROR; } - if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + /* Try to resume if already suspended or suspend in progress */ + + /* Skip if still in suspended or suspend in progress */ + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) { DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); return BCME_ERROR; @@ -3490,25 +3758,6 @@ dhd_bus_mem_dump(dhd_pub_t *dhdp) DHD_OS_WAKE_UNLOCK(dhdp); return ret; } - -int -dhd_dongle_mem_dump(void) -{ - if (!g_dhd_bus) { - DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); - return -ENODEV; - } - - dhd_bus_dump_console_buffer(g_dhd_bus); - dhd_prot_debug_info_print(g_dhd_bus->dhd); - - g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; - g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; - - dhd_bus_mem_dump(g_dhd_bus->dhd); - return 0; -} -EXPORT_SYMBOL(dhd_dongle_mem_dump); #endif /* DHD_FW_COREDUMP */ int @@ -3669,7 +3918,8 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) } while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { - PKTORPHAN(txp, bus->dhd->conf->tsq); + if (bus->dhd->conf->orphan_move <= 1) + PKTORPHAN(txp, bus->dhd->conf->tsq); /* * Modifying the packet length caused P2P cert failures. @@ -3713,8 +3963,13 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) } } +#ifdef DHD_HP2P + if (!flow_ring_node->hp2p_ring) { + dhd_prot_txdata_write_flush(bus->dhd, flow_id); + } +#else dhd_prot_txdata_write_flush(bus->dhd, flow_id); - +#endif // endif DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); } @@ -3854,6 +4109,19 @@ dhd_bus_stop_queue(struct dhd_bus *bus) void dhd_bus_start_queue(struct dhd_bus *bus) { + /* + * Tx queue has been stopped due to resource shortage (or) + * bus is not in a state to turn on. + * + * Note that we try to re-start network interface only + * when we have enough resources, one has to first change the + * flag indicating we have all the resources. + */ + if (dhd_prot_check_tx_resource(bus->dhd)) { + DHD_ERROR(("%s: Interface NOT started, previously stopped " + "due to resource shortage\n", __FUNCTION__)); + return; + } dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); } @@ -3905,36 +4173,67 @@ dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); } +void +dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr) +{ + dhdpcie_os_setbar1win(bus, addr); +} + /** 'offset' is a backplane address */ void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) { - W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } else { + dhdpcie_os_wtcm8(bus, offset, data); + } } uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) { volatile uint8 data; - data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint8)-1; + } else { + data = dhdpcie_os_rtcm8(bus, offset); + } return data; } void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) { - W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } else { + dhdpcie_os_wtcm32(bus, offset, data); + } } void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) { - W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } else { + dhdpcie_os_wtcm16(bus, offset, data); + } } #ifdef DHD_SUPPORT_64BIT void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) { - W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } else { + dhdpcie_os_wtcm64(bus, offset, data); + } } #endif /* DHD_SUPPORT_64BIT */ @@ -3942,7 +4241,12 @@ uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) { volatile uint16 data; - data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint16)-1; + } else { + data = dhdpcie_os_rtcm16(bus, offset); + } return data; } @@ -3950,7 +4254,12 @@ uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) { volatile uint32 data; - data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint32)-1; + } else { + data = dhdpcie_os_rtcm32(bus, offset); + } return data; } @@ -3959,7 +4268,12 @@ uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) { volatile uint64 data; - data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); + if (bus->is_linkdown) { + DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); + data = (uint64)-1; + } else { + data = dhdpcie_os_rtcm64(bus, offset); + } return data; } #endif /* DHD_SUPPORT_64BIT */ @@ -4212,15 +4526,19 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, name, (set ? "set" : "get"), len, plen)); - if (MULTIBP_ENAB(bus->sih)) { - dhd_bus_pcie_pwr_req(bus); - } - /* Look up var locally; if not found pass to host driver */ if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { goto exit; } + if (MULTIBP_ENAB(bus->sih)) { + if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) { + DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__)); + } else { + dhd_bus_pcie_pwr_req(bus); + } + } + /* set up 'params' pointer in case this is a set command so that * the convenience int and bool code can be common to set and get */ @@ -4250,7 +4568,11 @@ exit: bcmerror = BCME_OK; } else { if (MULTIBP_ENAB(bus->sih)) { - dhd_bus_pcie_pwr_req_clear(bus); + if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) { + DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__)); + } else { + dhd_bus_pcie_pwr_req_clear(bus); + } } } return bcmerror; @@ -4562,24 +4884,84 @@ done: #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u #else -#define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */ +#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */ #define DHD_SSRESET_STATUS_RETRY_DELAY 40u #endif // endif -#define DHD_SSRESET_STATUS_RETRIES 50u +/* + * Increase SSReset de-assert time to 8ms. + * since it takes longer time if re-scan time on 4378B0. + */ +#define DHD_SSRESET_STATUS_RETRIES 200u + +static void +dhdpcie_enum_reg_init(dhd_bus_t *bus) +{ + /* initialize Function control register (clear bit 4) to HW init value */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0, + PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE); + + /* clear IntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0); + /* clear IntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0)); + + /* clear MSIVector */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0); + /* clear MSIIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0); + /* clear MSIIntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0)); + + /* clear PowerIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0); + /* clear PowerIntStatus */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0)); + + /* clear MailboxIntMask */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0); + /* clear MailboxInt */ + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0, + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0)); +} int dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) { - bool flr_capab; + uint flr_capab; uint val; int retry = 0; DHD_ERROR(("******** Perform FLR ********\n")); + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { + if (bus->pcie_mailbox_mask != 0) { + dhdpcie_bus_intr_disable(bus); + } + /* initialize F0 enum registers before FLR for rev66/67 */ + dhdpcie_enum_reg_init(bus); + } + /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */ val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val)); flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT); - DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n", + DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n", PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab)); if (!flr_capab) { DHD_ERROR(("Chip does not support FLR\n")); @@ -4587,20 +4969,20 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) } /* Save pcie config space */ - DHD_ERROR(("Save Pcie Config Space\n")); + DHD_INFO(("Save Pcie Config Space\n")); DHD_PCIE_CONFIG_SAVE(bus); /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */ - DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); - DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT); - DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */ - DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY)); + DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY)); OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u); if (force_fail) { @@ -4620,16 +5002,16 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) } /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */ - DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); - DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT); - DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */ - DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)" + DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)" "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); do { val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); @@ -4647,11 +5029,11 @@ dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) bus->flr_force_fail = FALSE; DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__)); } - return BCME_ERROR; + return BCME_DONGLE_DOWN; } /* Restore pcie config space */ - DHD_ERROR(("Restore Pcie Config Space\n")); + DHD_INFO(("Restore Pcie Config Space\n")); DHD_PCIE_CONFIG_RESTORE(bus); DHD_ERROR(("******** FLR Succedeed ********\n")); @@ -4771,7 +5153,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) if (flag == TRUE) { /* Turn off WLAN */ /* Removing Power */ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); - + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); bus->dhd->up = FALSE; /* wait for other contexts to finish -- if required a call @@ -4800,6 +5182,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhdpcie_free_irq(bus); } dhd_deinit_bus_lock(bus); + dhd_deinit_backplane_access_lock(bus); dhd_bus_release_dongle(bus); dhdpcie_bus_free_resource(bus); bcmerror = dhdpcie_bus_disable_device(bus); @@ -4835,6 +5218,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) } #endif /* CONFIG_ARCH_MSM */ DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_GENERAL_UNLOCK(bus->dhd, flags); #ifdef DHD_PCIE_NATIVE_RUNTIMEPM @@ -4910,8 +5294,12 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) __FUNCTION__, bcmerror)); goto done; } +#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON) + dhd_bus_aspm_enable_rc_ep(bus, FALSE); +#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */ #endif /* CONFIG_ARCH_MSM */ bus->is_linkdown = 0; + bus->cto_triggered = 0; bcmerror = dhdpcie_bus_enable_device(bus); if (bcmerror) { DHD_ERROR(("%s: host configuration restore failed: %d\n", @@ -4942,6 +5330,10 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) bus->dhd->dongle_reset = FALSE; +#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) + dhd_irq_set_affinity(bus->dhd, cpumask_of(1)); +#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ + bcmerror = dhd_bus_start(dhdp); if (bcmerror) { DHD_ERROR(("%s: dhd_bus_start: %d\n", @@ -4966,6 +5358,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) done: if (bcmerror) { DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_GENERAL_UNLOCK(bus->dhd, flags); } @@ -4992,9 +5385,10 @@ static uint serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read) { uint ret; - dhd_pcie_backplane_access_lock(bus->dhd); + unsigned long flags; + DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags); ret = si_backplane_access(bus->sih, addr, size, val, read); - dhd_pcie_backplane_access_unlock(bus->dhd); + DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags); return ret; } @@ -5085,36 +5479,37 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons break; case IOV_SVAL(IOV_PCIE_DMAXFER): { - int int_val4 = 0; - int wait = 0; - int core_num = 0; - if (plen >= (int)sizeof(int_val) * 4) { - bcopy((void*)((uintptr)params + 3 * sizeof(int_val)), - &int_val4, sizeof(int_val4)); - } - if (plen >= (int)sizeof(int_val) * 5) { - bcopy((void*)((uintptr)params + 4 * sizeof(int_val)), - &wait, sizeof(wait)); - } - if (plen >= (int)sizeof(core_num) * 6) { - bcopy((void*)((uintptr)params + 5 * sizeof(core_num)), - &core_num, sizeof(core_num)); - } - bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, - int_val4, core_num, wait); - if (wait && bcmerror >= 0) { - /* get the status of the dma transfer */ - int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd); - bcopy(&int_val4, params, sizeof(int_val)); + dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg; + + if (!dmaxfer) + return BCME_BADARG; + if (dmaxfer->version != DHD_DMAXFER_VERSION) + return BCME_VERSION; + if (dmaxfer->length != sizeof(dma_xfer_info_t)) { + return BCME_BADLEN; + } + + bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes, + dmaxfer->src_delay, dmaxfer->dest_delay, + dmaxfer->type, dmaxfer->core_num, + dmaxfer->should_wait); + + if (dmaxfer->should_wait && bcmerror >= 0) { + bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); } break; } case IOV_GVAL(IOV_PCIE_DMAXFER): { - int dma_status = 0; - dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd); - bcopy(&dma_status, arg, val_size); - bcmerror = BCME_OK; + dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params; + if (!dmaxfer) + return BCME_BADARG; + if (dmaxfer->version != DHD_DMAXFER_VERSION) + return BCME_VERSION; + if (dmaxfer->length != sizeof(dma_xfer_info_t)) { + return BCME_BADLEN; + } + bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); break; } @@ -5515,30 +5910,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons break; case IOV_SVAL(IOV_CTO_PREVENTION): - { - uint32 pcie_lnkst; - - if (bus->sih->buscorerev < 19) { - bcmerror = BCME_UNSUPPORTED; - break; - } - si_corereg(bus->sih, bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS); - - pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, configdata), 0, 0); - - if ((bus->sih->buscorerev == 19) && - (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & - PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) { - bcmerror = BCME_UNSUPPORTED; - break; - } - bus->cto_enable = bool_val; - dhdpcie_cto_init(bus, bus->cto_enable); - DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n", - __FUNCTION__, bus->cto_enable)); - } + bcmerror = dhdpcie_cto_init(bus, bool_val); break; case IOV_GVAL(IOV_CTO_PREVENTION): @@ -5580,6 +5952,13 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons } break; + case IOV_GVAL(IOV_HWA_ENAB_BMAP): + int_val = bus->hwa_enab_bmap; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HWA_ENAB_BMAP): + bus->hwa_enab_bmap = (uint8)int_val; + break; case IOV_GVAL(IOV_IDMA_ENABLE): int_val = bus->idma_enabled; bcopy(&int_val, arg, val_size); @@ -5606,10 +5985,96 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bus->dar_enabled = (bool)int_val; break; case IOV_GVAL(IOV_HSCBSIZE): - dhd_get_hscb_info(bus->dhd->prot, NULL, (uint32 *)arg); + bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg); + break; + +#ifdef DHD_HP2P + case IOV_SVAL(IOV_HP2P_ENABLE): + dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val); break; - case IOV_GVAL(IOV_HSCBBYTES): - dhd_get_hscb_buff(bus->dhd->prot, int_val, int_val2, (void*)arg); + + case IOV_GVAL(IOV_HP2P_ENABLE): + int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD): + dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD): + int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD): + dhd_prot_time_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD): + int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HP2P_PKT_EXPIRY): + dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_PKT_EXPIRY): + int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS): + int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val); + break; + + case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS): + int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE); + bcopy(&int_val, arg, val_size); + break; +#endif /* DHD_HP2P */ + case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + if (int_val) + bus->dhd->extdtxs_in_txcpl = TRUE; + else + bus->dhd->extdtxs_in_txcpl = FALSE; + break; + + case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL): + int_val = bus->dhd->extdtxs_in_txcpl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + return BCME_NOTDOWN; + } + if (int_val) + bus->dhd->hostrdy_after_init = TRUE; + else + bus->dhd->hostrdy_after_init = FALSE; + break; + + case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT): + int_val = bus->dhd->hostrdy_after_init; + bcopy(&int_val, arg, val_size); break; default: @@ -5641,6 +6106,48 @@ dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) return 0; } +void +dhd_bus_dump_dar_registers(struct dhd_bus *bus) +{ + uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val, + dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val; + uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg, + dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg; + + if (bus->is_linkdown && !bus->cto_triggered) { + DHD_ERROR(("%s: link is down\n", __FUNCTION__)); + return; + } + + dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev); + dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev); + dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev); + dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev); + dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev); + dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev); + + if (bus->sih->buscorerev < 24) { + DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n", + __FUNCTION__, bus->sih->buscorerev)); + return; + } + + dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0); + dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0); + dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0); + dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0); + dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0); + dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0); + + DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n", + __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val, + dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val)); + + DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n", + __FUNCTION__, dar_errlog_reg, dar_errlog_val, + dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val)); +} + /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */ void dhd_bus_hostready(struct dhd_bus *bus) @@ -5654,14 +6161,18 @@ dhd_bus_hostready(struct dhd_bus *bus) return; } - DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, + DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32)))); + if (DAR_PWRREQ(bus)) { dhd_bus_pcie_pwr_req(bus); } + + dhd_bus_dump_dar_registers(bus); + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678); bus->hostready_count ++; - DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); + DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); } /* Clear INTSTATUS */ @@ -5763,7 +6274,7 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) return -EBUSY; } - bus->last_suspend_start_time = OSL_SYSUPTIME_US(); + bus->last_suspend_start_time = OSL_LOCALTIME_NS(); /* stop all interface network queue. */ dhd_bus_stop_queue(bus); @@ -5790,7 +6301,6 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) } #else DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - /* Clear wait_for_d3_ack before sending D3_INFORM */ bus->wait_for_d3_ack = 0; /* @@ -5818,7 +6328,12 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) __FUNCTION__, intstatus, host_irq_disabled)); dhd_pcie_intr_count_dump(bus->dhd); dhd_print_tasklet_status(bus->dhd); - dhd_prot_process_ctrlbuf(bus->dhd); + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && + !bus->use_mailbox) { + dhd_prot_process_ctrlbuf(bus->dhd); + } else { + dhdpcie_handle_mb_data(bus); + } timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); /* Clear Interrupts */ dhdpcie_bus_clear_intstatus(bus); @@ -5839,7 +6354,6 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) if (bus->wait_for_d3_ack) { DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); - /* Got D3 Ack. Suspend the bus */ if (active) { DHD_ERROR(("%s():Suspend failed because of wakelock" @@ -5914,7 +6428,11 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) } #if defined(BCMPCIE_OOB_HOST_WAKE) - dhdpcie_oob_intr_set(bus, TRUE); + if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) { + DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__)); + } else { + dhdpcie_oob_intr_set(bus, TRUE); + } #endif /* BCMPCIE_OOB_HOST_WAKE */ DHD_GENERAL_LOCK(bus->dhd, flags); @@ -5930,27 +6448,36 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) bus->dhd->d3ackcnt_timeout = 0; bus->dhd->busstate = DHD_BUS_SUSPEND; DHD_GENERAL_UNLOCK(bus->dhd, flags); - DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, " - "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__, - PCIECFGREG_BASEADDR0, - dhd_pcie_config_read(bus->osh, - PCIECFGREG_BASEADDR0, sizeof(uint32)), - PCIECFGREG_BASEADDR1, - dhd_pcie_config_read(bus->osh, - PCIECFGREG_BASEADDR1, sizeof(uint32)))); dhdpcie_dump_resource(bus); /* Handle Host Suspend */ rc = dhdpcie_pci_suspend_resume(bus, state); if (!rc) { - bus->last_suspend_end_time = OSL_SYSUPTIME_US(); + bus->last_suspend_end_time = OSL_LOCALTIME_NS(); } } } else if (timeleft == 0) { /* D3 ACK Timeout */ +#ifdef DHD_FW_COREDUMP + uint32 cur_memdump_mode = bus->dhd->memdump_enabled; +#endif /* DHD_FW_COREDUMP */ + + /* check if the D3 ACK timeout due to scheduling issue */ + bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) && + bus->isr_entry_time > bus->last_d3_inform_time && + dhd_bus_query_dpc_sched_errors(bus->dhd); bus->dhd->d3ack_timeout_occured = TRUE; /* If the D3 Ack has timeout */ bus->dhd->d3ackcnt_timeout++; - DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", - __FUNCTION__, bus->dhd->d3ackcnt_timeout)); + DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n", + __FUNCTION__, bus->dhd->is_sched_error ? + " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout)); +#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP) + if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) { + /* change g_assert_type to trigger Kernel panic */ + g_assert_type = 2; + /* use ASSERT() to trigger panic */ + ASSERT(0); + } +#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */ DHD_BUS_LOCK(bus->bus_lock, flags_bus); bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); @@ -5959,7 +6486,9 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) /* resume all interface network queue. */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); - if (!bus->dhd->dongle_trap_occured) { + if (!bus->dhd->dongle_trap_occured && + !bus->is_linkdown && + !bus->cto_triggered) { uint32 intstatus = 0; /* Check if PCIe bus status is valid */ @@ -5973,7 +6502,7 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) dhd_bus_dump_console_buffer(bus); dhd_prot_debug_info_print(bus->dhd); #ifdef DHD_FW_COREDUMP - if (bus->dhd->memdump_enabled) { + if (cur_memdump_mode) { /* write core dump to file */ bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; dhdpcie_mem_dump(bus); @@ -5992,7 +6521,7 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) } else { /* Resume */ DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__)); - bus->last_resume_start_time = OSL_SYSUPTIME_US(); + bus->last_resume_start_time = OSL_LOCALTIME_NS(); /** * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold. @@ -6009,11 +6538,6 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); #endif /* BCMPCIE_OOB_HOST_WAKE */ rc = dhdpcie_pci_suspend_resume(bus, state); - DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n", - __FUNCTION__, PCIECFGREG_BASEADDR0, - dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)), - PCIECFGREG_BASEADDR1, - dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)))); dhdpcie_dump_resource(bus); DHD_BUS_LOCK(bus->bus_lock, flags_bus); @@ -6053,8 +6577,8 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); } - bus->last_resume_end_time = OSL_SYSUPTIME_US(); - /* pro-actively update TCM rd index for EDL ring */ + bus->last_resume_end_time = OSL_LOCALTIME_NS(); + /* Update TCM rd index for EDL ring */ DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd); } return rc; @@ -6100,6 +6624,7 @@ dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk) if ((chipid == BCM4375_CHIP_ID || chipid == BCM4362_CHIP_ID || chipid == BCM43751_CHIP_ID || + chipid == BCM43752_CHIP_ID || chipid == BCM4377_CHIP_ID) && (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) { len += 8; @@ -6152,22 +6677,41 @@ dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, } +bool +dhd_bus_is_multibp_capable(struct dhd_bus *bus) +{ + return MULTIBP_CAP(bus->sih); +} + +#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */ +#define PCIE_REV_FOR_4378B0 68 + static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) { int bcmerror = 0; volatile uint32 *cr4_regs; + bool do_flr; if (!bus->sih) { DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); return BCME_ERROR; } + + do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) && + (bus->sih->buscorerev != PCIE_REV_FOR_4378B0)); + + if (MULTIBP_ENAB(bus->sih) && !do_flr) { + dhd_bus_pcie_pwr_req(bus); + } + /* To enter download state, disable ARM and reset SOCRAM. * To exit download state, simply reset ARM (default is RAM boot). */ if (enter) { + /* Make sure BAR1 maps to backplane address 0 */ - dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000); + dhdpcie_setbar1win(bus, 0x00000000); bus->alp_only = TRUE; /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ @@ -6356,6 +6900,10 @@ fail: /* Always return to PCIE core */ si_setcore(bus->sih, PCIE2_CORE_ID, 0); + if (MULTIBP_ENAB(bus->sih) && !do_flr) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return bcmerror; } /* dhdpcie_bus_download_state */ @@ -6683,45 +7231,76 @@ dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val) void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) { + dhd_bus_t *bus; + uint64 current_time = OSL_LOCALTIME_NS(); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + bus = dhd->bus; + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n"); bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" - "dpc_return_busdown_count=%lu\n", - dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, - dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count, - dhd->bus->dpc_return_busdown_count); + "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", + bus->resume_intr_enable_count, bus->dpc_intr_enable_count, + bus->isr_intr_disable_count, bus->suspend_intr_disable_count, + bus->dpc_return_busdown_count, bus->non_ours_irq_count); #ifdef BCMPCIE_OOB_HOST_WAKE bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu" - " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n", - dhd->bus->oob_intr_count, dhd->bus->oob_intr_enable_count, - dhd->bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus), - dhd->bus->last_oob_irq_time); + " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT + " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT + " oob_irq_enabled=%d oob_gpio_level=%d\n", + bus->oob_intr_count, bus->oob_intr_enable_count, + bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus), + GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time), + GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus), + dhdpcie_get_oob_irq_level()); #endif /* BCMPCIE_OOB_HOST_WAKE */ - bcm_bprintf(strbuf, "\ncurrent_time=%llu isr_entry_time=%llu isr_exit_time=%llu\n" - "dpc_entry_time=%llu last_process_ctrlbuf_time=%llu " - "last_process_flowring_time=%llu last_process_txcpl_time=%llu\n" - "last_process_rxcpl_time=%llu last_process_infocpl_time=%llu " - "dpc_exit_time=%llu resched_dpc_time=%llu\n", - OSL_SYSUPTIME_US(), dhd->bus->isr_entry_time, dhd->bus->isr_exit_time, - dhd->bus->dpc_entry_time, dhd->bus->last_process_ctrlbuf_time, - dhd->bus->last_process_flowring_time, dhd->bus->last_process_txcpl_time, - dhd->bus->last_process_rxcpl_time, dhd->bus->last_process_infocpl_time, - dhd->bus->dpc_exit_time, dhd->bus->resched_dpc_time); - - bcm_bprintf(strbuf, "\nlast_suspend_start_time=%llu last_suspend_end_time=%llu" - " last_resume_start_time=%llu last_resume_end_time=%llu\n", - dhd->bus->last_suspend_start_time, dhd->bus->last_suspend_end_time, - dhd->bus->last_resume_start_time, dhd->bus->last_resume_end_time); + bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT + " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n" + "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT + " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT + " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT + "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n" + "last_d3_inform_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time), + GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->last_process_ctrlbuf_time), + GET_SEC_USEC(bus->last_process_flowring_time), + GET_SEC_USEC(bus->last_process_txcpl_time), + GET_SEC_USEC(bus->last_process_rxcpl_time), + GET_SEC_USEC(bus->last_process_infocpl_time), + GET_SEC_USEC(bus->last_process_edl_time), + GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time), + GET_SEC_USEC(bus->last_d3_inform_time)); + + bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time=" + SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time=" + SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time), + GET_SEC_USEC(bus->last_suspend_end_time), + GET_SEC_USEC(bus->last_resume_start_time), + GET_SEC_USEC(bus->last_resume_end_time)); #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) - bcm_bprintf(strbuf, "logtrace_thread_entry_time=%llu " - "logtrace_thread_sem_down_time=%llu " - "logtrace_thread_flush_time=%llu " - "logtrace_thread_unexpected_break_time=%llu " - " logtrace_thread_complete_time=%llu\n", - dhd->logtrace_thr_ts.entry_time, dhd->logtrace_thr_ts.sem_down_time, - dhd->logtrace_thr_ts.flush_time, dhd->logtrace_thr_ts.unexpected_break_time, - dhd->logtrace_thr_ts.complete_time); + bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT + " logtrace_thread_sem_down_time="SEC_USEC_FMT + "\nlogtrace_thread_flush_time="SEC_USEC_FMT + " logtrace_thread_unexpected_break_time="SEC_USEC_FMT + "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)); #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ } @@ -6793,6 +7372,10 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); +#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS + bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n", + dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings); +#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ bcm_bprintf(strbuf, "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ", "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen", @@ -6843,26 +7426,8 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) bcm_bprintf(strbuf, "\n"); } - bcm_bprintf(strbuf, "\n"); - ix = 0; - bcm_bprintf(strbuf, "%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n", - "Num:", "Flow", "If", " ACKED", "D11SPRS", "WLSPRS", "TSDWL", - "NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED"); - for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { - flow_ring_node = DHD_FLOW_RING(dhdp, flowid); - if (!flow_ring_node->active) - continue; - - flow_info = &flow_ring_node->flow_info; - bcm_bprintf(strbuf, "%4d %4d %2d ", - ix++, flow_ring_node->flowid, flow_info->ifindex); - bcm_bprintf(strbuf, - "%5d %7d %6d %5d %5d %10d %7d %7d %7d\n", - "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA"); - } - #ifdef TX_STATUS_LATENCY_STATS - bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus"); + bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus"); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; for (ix = 0; ix < DHD_MAX_IFS; ix++) { if (!if_flow_lkup[ix].status) { @@ -6876,6 +7441,46 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) if_tx_status_latency[ix].num_tx_status); } #endif /* TX_STATUS_LATENCY_STATS */ + +#ifdef DHD_HP2P + if (dhdp->hp2p_capable) { + bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1"); + + for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) { + hp2p_info_t *hp2p_info; + int bin; + + hp2p_info = &dhdp->hp2p_info[flowid]; + if (hp2p_info->num_timer_start == 0) + continue; + + bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); + bcm_bprintf(strbuf, "\n%s", "Bin"); + + for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) { + bcm_bprintf(strbuf, "\n%2d %20d %16d", bin, + hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]); + } + + bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0"); + bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); + bcm_bprintf(strbuf, "\n%s", "Bin"); + + for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) { + bcm_bprintf(strbuf, "\n%d %20d", bin, + hp2p_info->rx_t0[bin]); + } + + bcm_bprintf(strbuf, "\n%s %16s %16s", + "Packet limit", "Timer limit", "Timer start"); + bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit, + hp2p_info->num_timer_limit, hp2p_info->num_timer_start); + } + + bcm_bprintf(strbuf, "\n"); + } +#endif /* DHD_HP2P */ + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); @@ -6886,6 +7491,158 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"); } +#ifdef DNGL_AXI_ERROR_LOGGING +bool +dhd_axi_sig_match(dhd_pub_t *dhdp) +{ + uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr); + + if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) { + DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__)); + return FALSE; + } + + DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n", + __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base, + dhdp->bus->dongle_ram_base + dhdp->bus->ramsize)); + if (axi_tcm_addr >= dhdp->bus->dongle_ram_base && + axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) { + uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr + + OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature))); + if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) { + return TRUE; + } else { + DHD_ERROR(("%s: No AXI signature: 0x%x\n", + __FUNCTION__, axi_signature)); + return FALSE; + } + } else { + DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__)); + return FALSE; + } +} + +void +dhd_axi_error(dhd_pub_t *dhdp) +{ + dhd_axi_error_dump_t *axi_err_dump; + uint8 *axi_err_buf = NULL; + uint8 *p_axi_err = NULL; + uint32 axi_logbuf_addr; + uint32 axi_tcm_addr; + int err, size; + + OSL_DELAY(75000); + + axi_logbuf_addr = dhdp->axierror_logbuf_addr; + if (!axi_logbuf_addr) { + DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__)); + goto sched_axi; + } + + axi_err_dump = dhdp->axi_err_dump; + if (!axi_err_dump) { + goto sched_axi; + } + + if (!dhd_axi_sig_match(dhdp)) { + goto sched_axi; + } + + /* Reading AXI error data for SMMU fault */ + DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__)); + axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr); + size = sizeof(hnd_ext_trap_axi_error_v1_t); + axi_err_buf = MALLOCZ(dhdp->osh, size); + if (axi_err_buf == NULL) { + DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); + goto sched_axi; + } + + p_axi_err = axi_err_buf; + err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size); + if (err) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, err, size, axi_tcm_addr)); + goto sched_axi; + } + + /* Dump data to Dmesg */ + dhd_log_dump_axi_error(axi_err_buf); + err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size); + if (err) { + DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n", + __FUNCTION__, err)); + } + +sched_axi: + if (axi_err_buf) { + MFREE(dhdp->osh, axi_err_buf, size); + } + dhd_schedule_axi_error_dump(dhdp, NULL); +} + +static void +dhd_log_dump_axi_error(uint8 *axi_err) +{ + dma_dentry_v1_t dma_dentry; + dma_fifo_v1_t dma_fifo; + int i = 0, j = 0; + + if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) { + hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err; + DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature)); + DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version)); + DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length)); + DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n", + __FUNCTION__, axi_err_v1->dma_fifo_valid_count)); + DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_status)); + DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_core)); + DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_hi)); + DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_lo)); + DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n", + __FUNCTION__, axi_err_v1->axi_errorlog_id)); + + for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) { + dma_fifo = axi_err_v1->dma_fifo[i]; + DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid)); + DHD_ERROR(("%s: direction:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.direction)); + DHD_ERROR(("%s: index:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.index)); + DHD_ERROR(("%s: dpa:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.dpa)); + DHD_ERROR(("%s: desc_lo:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.desc_lo)); + DHD_ERROR(("%s: desc_hi:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.desc_hi)); + DHD_ERROR(("%s: din:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.din)); + DHD_ERROR(("%s: dout:%d : 0x%x\n", + __FUNCTION__, i, dma_fifo.dout)); + for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) { + dma_dentry = axi_err_v1->dma_fifo[i].dentry[j]; + DHD_ERROR(("%s: ctrl1:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.ctrl1)); + DHD_ERROR(("%s: ctrl2:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.ctrl2)); + DHD_ERROR(("%s: addrlo:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.addrlo)); + DHD_ERROR(("%s: addrhi:%d : 0x%x\n", + __FUNCTION__, i, dma_dentry.addrhi)); + } + } + } + else { + DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err))); + } +} +#endif /* DNGL_AXI_ERROR_LOGGING */ + /** * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their * flow queue to their flow ring. @@ -6898,6 +7655,10 @@ dhd_update_txflowrings(dhd_pub_t *dhd) flow_ring_node_t *flow_ring_node; struct dhd_bus *bus = dhd->bus; + if (dhd_query_bus_erros(dhd)) { + return; + } + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); for (item = dll_head_p(&bus->flowring_active_list); @@ -6968,6 +7729,13 @@ dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) __FUNCTION__, bus->bus_low_power_state)); return; } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4)) { si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, @@ -7002,6 +7770,13 @@ dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake) __FUNCTION__, bus->bus_low_power_state)); return; } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + DHD_INFO(("writing a door bell 2 to the device\n")); if (DAR_PWRREQ(bus)) { dhd_bus_pcie_pwr_req(bus); @@ -7020,9 +7795,27 @@ dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) return; } + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + if (DAR_PWRREQ(bus)) { dhd_bus_pcie_pwr_req(bus); } + +#ifdef DHD_DB0TS + if (bus->dhd->db0ts_capable) { + uint64 ts; + + ts = local_clock(); + do_div(ts, 1000); + + value = htol32(ts & 0xFFFFFFFF); + DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value)); + } +#endif /* DHD_DB0TS */ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); } @@ -7036,6 +7829,12 @@ dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake) return; } + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + if (DAR_PWRREQ(bus)) { dhd_bus_pcie_pwr_req(bus); } @@ -7052,6 +7851,13 @@ dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) __FUNCTION__, bus->bus_low_power_state)); return; } + + /* Skip in the case of link down */ + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); } @@ -7098,7 +7904,7 @@ dhd_bus_dpc(struct dhd_bus *bus) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - bus->dpc_entry_time = OSL_SYSUPTIME_US(); + bus->dpc_entry_time = OSL_LOCALTIME_NS(); DHD_GENERAL_LOCK(bus->dhd, flags); /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS @@ -7139,9 +7945,9 @@ INTR_ON: * which has been disabled in the dhdpcie_bus_isr() */ dhdpcie_enable_irq(bus); /* Enable back interrupt!! */ - bus->dpc_exit_time = OSL_SYSUPTIME_US(); + bus->dpc_exit_time = OSL_LOCALTIME_NS(); } else { - bus->resched_dpc_time = OSL_SYSUPTIME_US(); + bus->resched_dpc_time = OSL_LOCALTIME_NS(); } bus->dpc_sched = resched; @@ -7204,6 +8010,7 @@ dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) done: if (h2d_mb_data == H2D_HOST_D3_INFORM) { DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->last_d3_inform_time = OSL_LOCALTIME_NS(); bus->d3_inform_cnt++; } if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { @@ -7236,11 +8043,21 @@ dhd_bus_handle_d3_ack(dhd_bus_t *bus) dhdpcie_bus_clear_intstatus(bus); dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */ - /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */ - bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; + if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) { + /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */ + bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; + DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__)); + } DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); - bus->wait_for_d3_ack = 1; - dhd_os_d3ack_wake(bus->dhd); + /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout. + * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout. + */ + if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } else { + DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__)); + } } void dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) @@ -7251,7 +8068,7 @@ dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data)); - if (d2h_mb_data & D2H_DEV_FWHALT) { + if (d2h_mb_data & D2H_DEV_FWHALT) { DHD_ERROR(("FW trap has happened\n")); dhdpcie_checkdied(bus, NULL, 0); dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); @@ -7262,6 +8079,7 @@ dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) BCM_REFERENCE(ds_acked); if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n")); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; goto exit; } @@ -7378,6 +8196,7 @@ static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) { bool resched = FALSE; + unsigned long flags_bus; if (MULTIBP_ENAB(bus->sih)) { dhd_bus_pcie_pwr_req(bus); @@ -7394,12 +8213,15 @@ dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) bus->api.handle_mb_data(bus); - if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox && - (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) { - DHD_ERROR(("%s: Bus is in power save state. " + /* Do no process any rings after recieving D3_ACK */ + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + DHD_ERROR(("%s: D3 Ack Recieved. " "Skip processing rest of ring buffers.\n", __FUNCTION__)); + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); goto exit; } + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); /* Validate intstatus only for INTX case */ if ((bus->d2h_intr_method == PCIE_MSI) || @@ -7453,10 +8275,17 @@ static bool dhdpci_bus_read_frames(dhd_bus_t *bus) { bool more = FALSE; + unsigned long flags_bus; /* First check if there a FW trap */ if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) && (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) { +#ifdef DNGL_AXI_ERROR_LOGGING + if (bus->dhd->axi_error) { + DHD_ERROR(("AXI Error happened\n")); + return FALSE; + } +#endif /* DNGL_AXI_ERROR_LOGGING */ dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT); return FALSE; } @@ -7465,33 +8294,43 @@ dhdpci_bus_read_frames(dhd_bus_t *bus) DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); dhd_prot_process_ctrlbuf(bus->dhd); - bus->last_process_ctrlbuf_time = OSL_SYSUPTIME_US(); + bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS(); /* Unlock to give chance for resp to be handled */ DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); - /* Do not process rest of ring buf once bus enters low power state */ - if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) { - DHD_ERROR(("%s: Bus is in power save state. " - "Skip processing rest of ring buffers.\n", __FUNCTION__)); + /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */ + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: Bus is in power save state (%d). " + "Skip processing rest of ring buffers.\n", + __FUNCTION__, bus->bus_low_power_state)); + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); return FALSE; } + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); /* update the flow ring cpls */ dhd_update_txflowrings(bus->dhd); - bus->last_process_flowring_time = OSL_SYSUPTIME_US(); + bus->last_process_flowring_time = OSL_LOCALTIME_NS(); /* With heavy TX traffic, we could get a lot of TxStatus * so add bound */ - more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound); - bus->last_process_txcpl_time = OSL_SYSUPTIME_US(); +#ifdef DHD_HP2P + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING); +#endif /* DHD_HP2P */ + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING); + bus->last_process_txcpl_time = OSL_LOCALTIME_NS(); /* With heavy RX traffic, this routine potentially could spend some time * processing RX frames without RX bound */ - more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); - bus->last_process_rxcpl_time = OSL_SYSUPTIME_US(); +#ifdef DHD_HP2P + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING); +#endif /* DHD_HP2P */ + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING); + bus->last_process_rxcpl_time = OSL_LOCALTIME_NS(); /* Process info ring completion messages */ #ifdef EWP_EDL @@ -7499,12 +8338,12 @@ dhdpci_bus_read_frames(dhd_bus_t *bus) #endif // endif { more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND); - bus->last_process_infocpl_time = OSL_SYSUPTIME_US(); + bus->last_process_infocpl_time = OSL_LOCALTIME_NS(); } #ifdef EWP_EDL else { more |= dhd_prot_process_msgbuf_edl(bus->dhd); - bus->last_process_edl_time = OSL_SYSUPTIME_US(); + bus->last_process_edl_time = OSL_LOCALTIME_NS(); } #endif /* EWP_EDL */ @@ -7615,14 +8454,24 @@ dhdpcie_readshared(dhd_bus_t *bus) addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); } + if (addr == (uint32)-1) { + DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__)); + bus->is_linkdown = 1; + return BCME_ERROR; + } + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || (addr > shaddr)) { DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", __FUNCTION__, addr)); DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed)); #ifdef DEBUG_DNGL_INIT_FAIL - bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE; - dhdpcie_mem_dump(bus); + if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE; + dhdpcie_mem_dump(bus); + } + } #endif /* DEBUG_DNGL_INIT_FAIL */ return BCME_ERROR; } else { @@ -7690,6 +8539,9 @@ dhdpcie_readshared(dhd_bus_t *bus) } } + /* TODO: This need to be selected based on IPC instead of compile time */ + bus->dhd->hwa_enable = TRUE; + if (idma_en) { bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE; bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE; @@ -7741,6 +8593,13 @@ dhdpcie_readshared(dhd_bus_t *bus) { ring_info_t ring_info; + /* boundary check */ + if (sh->rings_info_ptr > shaddr) { + DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n", + __FUNCTION__, sh->rings_info_ptr)); + return BCME_ERROR; + } + if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) return rv; @@ -7866,8 +8725,48 @@ dhdpcie_readshared(dhd_bus_t *bus) DHD_ERROR(("FW supports debug buf dest ? %s \n", bus->dhd->debug_buf_dest_support ? "Y" : "N")); +#ifdef DHD_HP2P + if (bus->dhd->hp2p_enable) { + bus->dhd->hp2p_ts_capable = + (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP; + bus->dhd->hp2p_capable = + (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P; + bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable; + + DHD_ERROR(("FW supports HP2P ? %s \n", + bus->dhd->hp2p_capable ? "Y" : "N")); + + if (bus->dhd->hp2p_capable) { + bus->dhd->pkt_thresh = HP2P_PKT_THRESH; + bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY; + bus->dhd->time_thresh = HP2P_TIME_THRESH; + for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) { + hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr]; + + hp2p_info->hrtimer_init = FALSE; + tasklet_hrtimer_init(&hp2p_info->timer, + dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + } + } + } +#endif /* DHD_HP2P */ + +#ifdef DHD_DB0TS + bus->dhd->db0ts_capable = + (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0; +#endif /* DHD_DB0TS */ + if (MULTIBP_ENAB(bus->sih)) { dhd_bus_pcie_pwr_req_clear(bus); + + /* + * WAR to fix ARM cold boot; + * De-assert WL domain in DAR + */ + if (bus->sih->buscorerev >= 68) { + dhd_bus_pcie_pwr_req_wl_domain(bus, + DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE); + } } return BCME_OK; } /* dhdpcie_readshared */ @@ -7984,7 +8883,7 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) if (!bus->dhd) return 0; - if (bus->sih->buscorerev == 66) { + if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) { dhd_bus_pcie_pwr_req_clear_reload_war(bus); } @@ -7992,6 +8891,9 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) dhd_bus_pcie_pwr_req(bus); } + /* Configure AER registers to log the TLP header */ + dhd_bus_aer_config(bus); + /* Make sure we're talking to the core. */ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); ASSERT(bus->reg != NULL); @@ -8009,6 +8911,8 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) dhd_init_bus_lock(bus); + dhd_init_backplane_access_lock(bus); + /* Set bus state according to enable result */ dhdp->busstate = DHD_BUS_DATA; bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; @@ -8048,6 +8952,7 @@ dhdpcie_init_shared_addr(dhd_bus_t *bus) { uint32 addr = 0; uint32 val = 0; + addr = bus->dongle_ram_base + bus->ramsize - 4; dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); } @@ -8154,6 +9059,10 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) { return 0; } + if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) || + (device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) { + return 0; + } if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) || (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) { return 0; @@ -8382,6 +9291,13 @@ void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) dhd_tcpack_info_tbl_clean(bus->dhd); #endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_HP2P + if (flow_ring_node->hp2p_ring) { + bus->dhd->hp2p_ring_active = FALSE; + flow_ring_node->hp2p_ring = FALSE; + } +#endif /* DHD_HP2P */ + /* clean up BUS level info */ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); @@ -8439,8 +9355,26 @@ dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); + /* Boundary check of the flowid */ + if (flowid >= bus->dhd->num_flow_rings) { + DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, + flowid, bus->dhd->num_flow_rings)); + return; + } + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } if (status != BCME_OK) { DHD_ERROR(("%s Flow create Response failure error status = %d \n", @@ -8528,8 +9462,26 @@ dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); + /* Boundary check of the flowid */ + if (flowid >= bus->dhd->num_flow_rings) { + DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, + flowid, bus->dhd->num_flow_rings)); + return; + } + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } if (status != BCME_OK) { DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", @@ -8593,8 +9545,26 @@ dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) return; } + /* Boundary check of the flowid */ + if (flowid >= bus->dhd->num_flow_rings) { + DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, + flowid, bus->dhd->num_flow_rings)); + return; + } + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + if (!flow_ring_node) { + DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); + return; + } + ASSERT(flow_ring_node->flowid == flowid); + if (flow_ring_node->flowid != flowid) { + DHD_ERROR(("%s: flowid %d is different from the flowid " + "of the flow_ring_node %d\n", __FUNCTION__, flowid, + flow_ring_node->flowid)); + return; + } flow_ring_node->status = FLOW_RING_STATUS_OPEN; return; @@ -8619,6 +9589,18 @@ dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) dhdp->bus->is_linkdown = val; } +int +dhd_bus_get_linkdown(dhd_pub_t *dhdp) +{ + return dhdp->bus->is_linkdown; +} + +int +dhd_bus_get_cto(dhd_pub_t *dhdp) +{ + return dhdp->bus->cto_triggered; +} + #ifdef IDLE_TX_FLOW_MGMT /* resume request */ int @@ -8871,41 +9853,76 @@ dhd_bus_release_dongle(struct dhd_bus *bus) return 0; } -void -dhdpcie_cto_init(struct dhd_bus *bus, bool enable) +int +dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable) { uint32 val; - if (enable) { dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR); val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN); - if (bus->cto_threshold == 0) { - bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; - } - - si_corereg(bus->sih, bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, ctoctrl), ~0, - ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & - PCIE_CTO_TO_THRESHHOLD_MASK) | - ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & - PCIE_CTO_CLKCHKCNT_MASK) | - PCIE_CTO_ENAB_MASK); } else { dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0); val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN); + } + return 0; +} +int +dhdpcie_cto_init(struct dhd_bus *bus, bool enable) +{ + if (bus->sih->buscorerev < 19) { + DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n", + __FUNCTION__, bus->sih->buscorerev)); + return BCME_UNSUPPORTED; + } + + if (bus->sih->buscorerev == 19) { + uint32 pcie_lnkst; + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS); + + pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + + if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & + PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) { + return BCME_UNSUPPORTED; + } + } + + bus->cto_enable = enable; + + dhdpcie_cto_cfg_init(bus, enable); + + if (enable) { + if (bus->cto_threshold == 0) { + bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; + } + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, + ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & + PCIE_CTO_TO_THRESHHOLD_MASK) | + ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & + PCIE_CTO_CLKCHKCNT_MASK) | + PCIE_CTO_ENAB_MASK); + } else { si_corereg(bus->sih, bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); } + + DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n", + __FUNCTION__, bus->cto_enable)); + + return 0; } -static void +static int dhdpcie_cto_error_recovery(struct dhd_bus *bus) { - uint32 pci_intmask, err_status, dar_val; + uint32 pci_intmask, err_status; uint8 i = 0; uint32 val; @@ -8919,29 +9936,7 @@ dhdpcie_cto_error_recovery(struct dhd_bus *bus) /* * DAR still accessible */ - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val)); - - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val)); - - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_INTSTAT(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val)); - - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_ERRLOG(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val)); - - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_ERRADDR(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val)); - - dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, - DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0); - DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val)); + dhd_bus_dump_dar_registers(bus); /* reset backplane */ val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); @@ -8965,7 +9960,7 @@ dhdpcie_cto_error_recovery(struct dhd_bus *bus) DHD_ERROR(("cto recovery fail\n")); DHD_OS_WAKE_UNLOCK(bus->dhd); - return; + return BCME_ERROR; } } @@ -8987,6 +9982,8 @@ dhdpcie_cto_error_recovery(struct dhd_bus *bus) PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); DHD_OS_WAKE_UNLOCK(bus->dhd); + + return BCME_OK; } void @@ -9076,6 +10073,18 @@ dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 } } +bool +dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->hwa_enab_bmap) { + return bus->dhd->hwa_enable; + } else { + return FALSE; + } +} + bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus) { @@ -9158,17 +10167,19 @@ dhd_get_idletime(dhd_pub_t *dhd) return dhd->bus->idletime; } -#ifdef DHD_SSSR_DUMP - static INLINE void dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read) { OSL_DELAY(1); - serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read); - DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read)); + if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) { + DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr)); + } else { + DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read)); + } return; } +#ifdef DHD_SSSR_DUMP static int dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, uint addr_reg, uint data_reg) @@ -9197,7 +10208,11 @@ dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, addr = data_reg; /* Read 4 bytes at once and loop for fifo_size / 4 */ for (i = 0; i < fifo_size / 4; i++) { - serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val, TRUE); + if (serialized_backplane_access(dhd->bus, addr, + sizeof(uint), &val, TRUE) != BCME_OK) { + DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__)); + return BCME_ERROR; + } buf[i] = val; OSL_DELAY(1); } @@ -9229,7 +10244,12 @@ dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) && dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) { - dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size); + int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, + fifo_size); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading dig dump from dongle !\n", + __FUNCTION__)); + } } else { /* Check if vasip clk is disabled, if yes enable it */ addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; @@ -9242,8 +10262,12 @@ dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, addr = addr_reg; /* Read 4 bytes at once and loop for fifo_size / 4 */ for (i = 0; i < fifo_size / 4; i++, addr += 4) { - serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val, - TRUE); + if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), + &val, TRUE) != BCME_OK) { + DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__, + addr)); + return BCME_ERROR; + } buf[i] = val; OSL_DELAY(1); } @@ -9304,6 +10328,9 @@ dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, hdr = (hnd_ext_trap_hdr_t *)ext_trap_data; tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA); if (tlv) { + uint32 baseaddr = 0; + uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4; + etd_evtlog = (eventlog_trapdata_info_t *)tlv->data; DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; " "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__, @@ -9319,6 +10346,16 @@ dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); return; } + + /* boundary check */ + baseaddr = etd_evtlog->log_arr_addr; + if ((baseaddr < dhd->bus->dongle_ram_base) || + ((baseaddr + arr_size) > endaddr)) { + DHD_ERROR(("%s: Error reading invalid address\n", + __FUNCTION__)); + goto err; + } + /* read the eventlog_trap_buf_info_t array from dongle memory */ err = dhdpcie_bus_membytes(dhd->bus, FALSE, (ulong)(etd_evtlog->log_arr_addr), @@ -9335,6 +10372,14 @@ dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, seqnum = ntoh32(etd_evtlog->seq_num); memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); for (i = 0; i < (etd_evtlog->num_elements); ++i) { + /* boundary check */ + baseaddr = evtlog_buf_arr[i].buf_addr; + if ((baseaddr < dhd->bus->dongle_ram_base) || + ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) { + DHD_ERROR(("%s: Error reading invalid address\n", + __FUNCTION__)); + goto err; + } /* read each individual event log buf from dongle memory */ err = dhdpcie_bus_membytes(dhd->bus, FALSE, ((ulong)evtlog_buf_arr[i].buf_addr), @@ -9357,11 +10402,11 @@ err: } #endif /* BCMPCIE && DHD_LOG_DUMP */ -static int -dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd) +static uint32 +dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val) { uint addr; - uint val; + uint val = 0; DHD_ERROR(("%s\n", __FUNCTION__)); @@ -9370,29 +10415,28 @@ dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd) dhd_sbreg_op(dhd, addr, &val, TRUE); if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) { addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; - val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask; - dhd_sbreg_op(dhd, addr, &val, FALSE); + dhd_sbreg_op(dhd, addr, ®_val, FALSE); } return BCME_OK; } -static int +static uint32 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd) { uint addr; - uint val; + uint val = 0, reg_val = 0; DHD_ERROR(("%s\n", __FUNCTION__)); /* conditionally clear bits [11:8] of PowerCtrl */ addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; - dhd_sbreg_op(dhd, addr, &val, TRUE); - if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) { + dhd_sbreg_op(dhd, addr, ®_val, TRUE); + if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) { addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; val = 0; dhd_sbreg_op(dhd, addr, &val, FALSE); } - return BCME_OK; + return reg_val; } static int @@ -9546,10 +10590,51 @@ dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd) val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val; dhd_sbreg_op(dhd, addr, &val, FALSE); } + + if (MULTIBP_ENAB(dhd->bus->sih)) { + uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; + + /* Just halt ARM but do not reset the core */ + resetctrl &= ~(SI_CORE_SIZE - 1); + resetctrl += OFFSETOF(aidmp_t, ioctrl); + + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + val |= SICF_CPUHALT; + dhd_sbreg_op(dhd, resetctrl, &val, FALSE); + } } return BCME_OK; } +static int +dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* Check if bit 0 of resetctrl is cleared */ + addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + if (MULTIBP_ENAB(dhd->bus->sih)) { + uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; + + /* Take ARM out of halt but do not reset core */ + resetctrl &= ~(SI_CORE_SIZE - 1); + resetctrl += OFFSETOF(aidmp_t, ioctrl); + + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + val &= ~SICF_CPUHALT; + dhd_sbreg_op(dhd, resetctrl, &val, FALSE); + dhd_sbreg_op(dhd, resetctrl, &val, TRUE); + } + } + + return BCME_OK; +} + static int dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd) { @@ -9705,9 +10790,11 @@ dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd) return BCME_OK; } -static int +int dhdpcie_sssr_dump(dhd_pub_t *dhd) { + uint32 powerctrl_val; + if (!dhd->sssr_inited) { DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); return BCME_ERROR; @@ -9718,6 +10805,14 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd) return BCME_ERROR; } + DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + dhdpcie_d11_check_outofreset(dhd); DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__)); @@ -9727,14 +10822,41 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd) } dhdpcie_clear_intmask_and_timer(dhd); - dhdpcie_suspend_chipcommon_powerctrl(dhd); + powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd); dhdpcie_clear_clk_req(dhd); dhdpcie_pcie_send_ltrsleep(dhd); + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE); + } + /* Wait for some time before Restore */ OSL_DELAY(6000); - dhdpcie_resume_chipcommon_powerctrl(dhd); + DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + + if (MULTIBP_ENAB(dhd->bus->sih)) { + dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE); + /* Add delay for WL domain to power up */ + OSL_DELAY(15000); + + DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) " + "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(chipcregs_t, powerctl), 0, 0), + si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), + PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), + PMU_REG(dhd->bus->sih, res_state, 0, 0))); + } + + dhdpcie_arm_resume_clk_req(dhd); + dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val); dhdpcie_bring_d11_outofreset(dhd); DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); @@ -9742,18 +10864,12 @@ dhdpcie_sssr_dump(dhd_pub_t *dhd) DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); return BCME_ERROR; } - - dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR); + dhd->sssr_dump_collected = TRUE; + dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR); return BCME_OK; } -int -dhd_bus_sssr_dump(dhd_pub_t *dhd) -{ - return dhdpcie_sssr_dump(dhd); -} - static int dhdpcie_fis_trigger(dhd_pub_t *dhd) { @@ -9819,7 +10935,7 @@ dhdpcie_fis_dump(dhd_pub_t *dhd) return BCME_ERROR; } - dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_FIS); + dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS); return BCME_OK; } @@ -9844,207 +10960,6 @@ dhd_bus_get_bus_wake(dhd_pub_t *dhd) } #endif /* DHD_WAKE_STATUS */ -#define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP) -#define OTP_USER_AREA_OFFSET 0x80 -#define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET) -#define OTP_VERSION_TUPLE_ID 0x15 -#define OTP_VENDOR_TUPLE_ID 0x80 -#define OTP_CIS_REGION_END_TUPLE_ID 0XFF -#define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE) -#define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG) -#define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4) -#define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL) -#define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C) -#define PMU_OTP_PWR_ON_MASK 0xC47 - -int -dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path) -{ - uint32 val = 0; - uint16 chip_id = 0; - uint8 otp_data[2]; - char stepping[3]; - char module_name[5]; - char module_vendor = 0; - char module_rev[4]; - uint8 tuple_id = 0; - uint8 tuple_len = 0; - uint32 cur_offset = 0; - uint32 version_tuple_offset = 0; - char module_info[64]; - char progname[32]; - bool srom_present = 0, otp_present = 0; - uint32 sprom_ctrl = 0; - uint32 otp_ctrl = 0, minres_mask = 0; - int i = 0, j = 0, status = BCME_ERROR; - - if (!nv_path || !bus) { - return BCME_ERROR; - } - - /* read chip id first */ - if (serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) { - DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__)); - } - else { - chip_id = val & 0xffff; - } - - /* read SpromCtrl register */ - serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE); - val = sprom_ctrl; - - /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set - * and chip is 4355 or 4364 - */ - if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) { - otp_present = 1; - - /* Check if the 4th bit (sprom_present) in CC Status REG is set */ - serialized_backplane_access(bus, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE); - if (val & 0x10) { - srom_present = 1; - } - - /* OTP power up sequence */ - /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */ - serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE); - val = 0x1A0000; - serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &val, FALSE); - - /* 2. enable OTP power through min res mask register in PMU */ - serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE); - val = minres_mask | PMU_OTP_PWR_ON_MASK; - serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE); - - /* 3. if srom is present, need to set OtpSelect 4th bit - * in SpromCtrl register to read otp - */ - if (srom_present) { - - val = sprom_ctrl | 0x10; - serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &val, FALSE); - - } - /* Wait for PMU to power up. */ - OSL_DELAY(500); - serialized_backplane_access(bus, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE); - DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val)); - - serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE); - DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val)); - - serialized_backplane_access(bus, OTP_ADDRESS, 2, &val, TRUE); - DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val)); - - cur_offset = OTP_USER_AREA_ADDR + 0x40; - /* read required data from otp to construct FW string name - * data like - chip info, module info. This is present in the - * form of a Vendor CIS Tuple whose format is provided by Olympic. - * The data is in the form of ASCII character strings. - * The Vendor tuple along with other CIS tuples are present - * in the OTP user area. A CIS tuple is a TLV format. - * (T = 1-byte, L = 1-byte, V = n-bytes) - */ - - /* Find the version tuple */ - while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) { - serialized_backplane_access(bus, cur_offset, - 2, (uint *)otp_data, TRUE); - - tuple_id = otp_data[0]; - tuple_len = otp_data[1]; - if (tuple_id == OTP_VERSION_TUPLE_ID) { - version_tuple_offset = cur_offset; - break; - } - /* if its NULL tuple, skip */ - if (tuple_id == 0) - cur_offset += 1; - else - cur_offset += tuple_len + 2; - } - - /* skip the major, minor ver. numbers, manufacturer and product names */ - cur_offset = version_tuple_offset + 6; - - /* read the chip info */ - serialized_backplane_access(bus, cur_offset, - 2, (uint *)otp_data, TRUE); - if (otp_data[0] == 's' && otp_data[1] == '=') { - /* read the stepping */ - cur_offset += 2; - stepping[2] = 0; - serialized_backplane_access(bus, cur_offset, - 2, (uint *)stepping, TRUE); - /* read module info */ - memset(module_info, 0, 64); - cur_offset += 2; - serialized_backplane_access(bus, cur_offset, - 2, (uint *)otp_data, TRUE); - while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID && - otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) { - memcpy(&module_info[i], otp_data, 2); - i += 2; - cur_offset += 2; - serialized_backplane_access(bus, cur_offset, - 2, (uint *)otp_data, TRUE); - } - /* replace any null characters found at the beginning - * and middle of the string - */ - for (j = 0; j < i; ++j) { - if (module_info[j] == 0) - module_info[j] = ' '; - } - DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n", - stepping[0], stepping[1], module_info)); - /* extract the module name, revision and vendor - * information from the module info string - */ - for (i = 0; module_info[i]; i++) { - if (module_info[i] == 'M' && module_info[i + 1] == '=') { - memcpy(module_name, &module_info[i + 2], 4); - module_name[4] = 0; - i += 5; - } - else if (module_info[i] == 'm' && module_info[i + 1] == '=') { - memcpy(module_rev, &module_info[i + 2], 3); - module_rev[3] = 0; - i += 4; - } - else if (module_info[i] == 'V' && module_info[i + 1] == '=') { - module_vendor = module_info[i + 2]; - i += 2; - } - } - - /* construct the complete file path to nvram as per - * olympic conventions - */ - strncpy(progname, program, sizeof(progname)); - sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name, - module_vendor, module_rev); - DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path)); - status = BCME_OK; - } - - /* restore back the registers to their previous values */ - if (srom_present) { - serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, - FALSE); - } - - if (otp_present) { - serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, - &minres_mask, FALSE); - serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE); - } - - } - return status; -} - /* Writes random number(s) to the TCM. FW upon initialization reads this register * to fetch the random number, and uses it to randomize heap address space layout. */ @@ -10094,17 +11009,28 @@ dhd_pcie_intr_count_dump(dhd_pub_t *dhd) DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n", dhdpcie_get_oob_irq_num(bus), GET_SEC_USEC(bus->last_oob_irq_time))); + DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT + " last_oob_irq_disable_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_oob_irq_enable_time), + GET_SEC_USEC(bus->last_oob_irq_disable_time))); + DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n", + dhdpcie_get_oob_irq_status(bus), + dhdpcie_get_oob_irq_level())); #endif /* BCMPCIE_OOB_HOST_WAKE */ - DHD_ERROR(("dpc_return_busdown_count=%lu\n", - dhd->bus->dpc_return_busdown_count)); + DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", + bus->dpc_return_busdown_count, bus->non_ours_irq_count)); - current_time = OSL_SYSUPTIME_US(); + current_time = OSL_LOCALTIME_NS(); DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time))); DHD_ERROR(("isr_entry_time="SEC_USEC_FMT " isr_exit_time="SEC_USEC_FMT"\n", GET_SEC_USEC(bus->isr_entry_time), GET_SEC_USEC(bus->isr_exit_time))); + DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT + " last_non_ours_irq_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->dpc_sched_time), + GET_SEC_USEC(bus->last_non_ours_irq_time))); DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT " last_process_ctrlbuf_time="SEC_USEC_FMT"\n", GET_SEC_USEC(bus->dpc_entry_time), @@ -10114,13 +11040,17 @@ dhd_pcie_intr_count_dump(dhd_pub_t *dhd) GET_SEC_USEC(bus->last_process_flowring_time), GET_SEC_USEC(bus->last_process_txcpl_time))); DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT - " last_process_infocpl_time="SEC_USEC_FMT"\n", + " last_process_infocpl_time="SEC_USEC_FMT + " last_process_edl_time="SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_process_rxcpl_time), - GET_SEC_USEC(bus->last_process_infocpl_time))); + GET_SEC_USEC(bus->last_process_infocpl_time), + GET_SEC_USEC(bus->last_process_edl_time))); DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT " resched_dpc_time="SEC_USEC_FMT"\n", GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time))); + DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_d3_inform_time))); DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT " last_suspend_end_time="SEC_USEC_FMT"\n", @@ -10133,10 +11063,10 @@ dhd_pcie_intr_count_dump(dhd_pub_t *dhd) #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT - "logtrace_thread_sem_down_time="SEC_USEC_FMT - "logtrace_thread_flush_time="SEC_USEC_FMT - "logtrace_thread_unexpected_break_time="SEC_USEC_FMT - " logtrace_thread_complete_time="SEC_USEC_FMT"\n", + " logtrace_thread_sem_down_time="SEC_USEC_FMT + "\nlogtrace_thread_flush_time="SEC_USEC_FMT + " logtrace_thread_unexpected_break_time="SEC_USEC_FMT + "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), @@ -10151,6 +11081,147 @@ dhd_bus_intr_count_dump(dhd_pub_t *dhd) dhd_pcie_intr_count_dump(dhd); } +int +dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd) +{ + uint32 save_idx, val; + si_t *sih = dhd->bus->sih; + uint32 oob_base, oob_base1; + uint32 wrapper_dump_list[] = { + AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74, + AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74, + AI_RESETSTATUS, AI_RESETCTRL, + AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD, + AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT + }; + uint32 i; + hndoobr_reg_t *reg; + cr4regs_t *cr4regs; + ca7regs_t *ca7regs; + + save_idx = si_coreidx(sih); + + DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__)); + + if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) { + for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) { + val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); + DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); + } + } + + if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) { + DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__)); + for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) { + val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); + DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); + } + DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities)); + DHD_ERROR(("reg:0x%x val:0x%x\n", + (uint)OFFSETOF(cr4regs_t, corecapabilities), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val)); + val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val)); + } + + if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) { + DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities)); + DHD_ERROR(("reg:0x%x val:0x%x\n", + (uint)OFFSETOF(ca7regs_t, corecapabilities), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val)); + val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl)); + DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val)); + } + + DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__)); + + oob_base = si_oobr_baseaddr(sih, FALSE); + oob_base1 = si_oobr_baseaddr(sih, TRUE); + if (oob_base) { + dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE); + dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE); + } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) { + val = R_REG(dhd->osh, ®->intstatus[0]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[1]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[2]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + val = R_REG(dhd->osh, ®->intstatus[3]); + DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); + } + + if (oob_base1) { + DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__)); + + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE); + dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE); + } + + si_setcoreidx(dhd->bus->sih, save_idx); + + return 0; +} + +static void +dhdpcie_hw_war_regdump(dhd_bus_t *bus) +{ + uint32 save_idx, val; + volatile uint32 *reg; + + save_idx = si_coreidx(bus->sih); + if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("CC HW_WAR :0x%x\n", val)); + } + + if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("ARM HW_WAR:0x%x\n", val)); + } + + if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) { + val = R_REG(bus->osh, reg + REG_WORK_AROUND); + DHD_ERROR(("PCIE HW_WAR :0x%x\n", val)); + } + si_setcoreidx(bus->sih, save_idx); + + val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0); + DHD_ERROR(("MINRESMASK :0x%x\n", val)); +} + int dhd_pcie_dma_info_dump(dhd_pub_t *dhd) { @@ -10207,30 +11278,55 @@ dhd_pcie_dma_info_dump(dhd_pub_t *dhd) return 0; } -int -dhd_pcie_debug_info_dump(dhd_pub_t *dhd) +bool +dhd_pcie_dump_int_regs(dhd_pub_t *dhd) { uint32 intstatus = 0; uint32 intmask = 0; uint32 d2h_db0 = 0; uint32 d2h_mb_data = 0; - int host_irq_disabled; - DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state)); - host_irq_disabled = dhdpcie_irq_disabled(dhd->bus); - DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled)); - dhd_print_tasklet_status(dhd); - dhd_pcie_intr_count_dump(dhd); + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("intstatus=0x%x \n", intstatus)); + return FALSE; + } - DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n")); - dhdpcie_dump_resource(dhd->bus); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_mask, 0, 0); + if (intmask == (uint32) -1) { + DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask)); + return FALSE; + } + + d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCID2H_MailBox, 0, 0); + if (d2h_db0 == (uint32)-1) { + DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0)); + return FALSE; + } + DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0)); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, + dhd->bus->def_intmask)); + + return TRUE; +} + +void +dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd) +{ DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n")); - DHD_ERROR(("Pcie RC Error Status Val=0x%x\n", + DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n", dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); #ifdef EXTENDED_PCIE_DEBUG_DUMP - DHD_ERROR(("hdrlog0 =0x%x hdrlog1 =0x%x hdrlog2 =0x%x hdrlog3 =0x%x\n", + DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n", dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0), dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, @@ -10240,18 +11336,37 @@ dhd_pcie_debug_info_dump(dhd_pub_t *dhd) dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0))); #endif /* EXTENDED_PCIE_DEBUG_DUMP */ +} + +int +dhd_pcie_debug_info_dump(dhd_pub_t *dhd) +{ + int host_irq_disabled; + + DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state)); + host_irq_disabled = dhdpcie_irq_disabled(dhd->bus); + DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled)); + dhd_print_tasklet_status(dhd); + dhd_pcie_intr_count_dump(dhd); + + DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n")); + dhdpcie_dump_resource(dhd->bus); + + dhd_pcie_dump_rc_conf_space_cap(dhd); DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", dhd_debug_get_rc_linkcap(dhd->bus))); - DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n")); - DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x\n", + DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x " + "PCIE_CFG_PMCSR(0x%x)=0x%x\n", PCIECFGREG_STATUS_CMD, dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)), PCIECFGREG_BASEADDR0, dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)), PCIECFGREG_BASEADDR1, - dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)))); + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)), + PCIE_CFG_PMCSR, + dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32)))); DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x " "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL, @@ -10261,66 +11376,38 @@ dhd_pcie_debug_info_dump(dhd_pub_t *dhd) dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32)))); #ifdef EXTENDED_PCIE_DEBUG_DUMP - DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x " - "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL, - dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL, - sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL, - dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, - sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2, - dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2, - sizeof(uint32)))); + DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n", + dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); + DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x " + "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1, + dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)), + PCI_TLP_HDR_LOG2, + dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)), + PCI_TLP_HDR_LOG3, + dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)), + PCI_TLP_HDR_LOG4, + dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32)))); + if (dhd->bus->sih->buscorerev >= 24) { + DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x " + "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL, + dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, + sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)))); + dhd_bus_dump_dar_registers(dhd->bus); + } #endif /* EXTENDED_PCIE_DEBUG_DUMP */ - intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - dhd->bus->pcie_mailbox_int, 0, 0); - - intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - dhd->bus->pcie_mailbox_mask, 0, 0); - d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCID2H_MailBox, 0, 0); - DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); - DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", - intstatus, intmask, d2h_db0)); - dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); - DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, - dhd->bus->def_intmask)); -#ifdef EXTENDED_PCIE_DEBUG_DUMP - DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n")); - DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n", - PCIDARClkCtl(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARClkCtl(dhd->bus->sih->buscorerev), 0, 0), - PCIDARPwrCtl(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARPwrCtl(dhd->bus->sih->buscorerev), 0, 0), - PCIDARH2D_DB0(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARH2D_DB0(dhd->bus->sih->buscorerev), 0, 0))); -#endif /* EXTENDED_PCIE_DEBUG_DUMP */ - if (intstatus == (uint32)-1) { - DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid intstatus\n")); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n")); return 0; } DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n")); -#ifdef EXTENDED_PCIE_DEBUG_DUMP - DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n", - PCIDARErrlog(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARErrlog(dhd->bus->sih->buscorerev), 0, 0), - PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), 0, 0))); - DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n", - PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0), - PCIDARMailboxint(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIDARMailboxint(dhd->bus->sih->buscorerev), 0, 0))); -#endif /* EXTENDED_PCIE_DEBUG_DUMP */ - DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), @@ -10332,54 +11419,68 @@ dhd_pcie_debug_info_dump(dhd_pub_t *dhd) dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); #ifdef EXTENDED_PCIE_DEBUG_DUMP - DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x ltssm_hist_2(0x%x)=0x%x " - "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0), - PCIECFGREG_PHY_LTSSM_HIST_1, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1), - PCIECFGREG_PHY_LTSSM_HIST_2, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2), - PCIECFGREG_PHY_LTSSM_HIST_3, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3))); - DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n", - PCIE_CLK_CTRL, - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CLK_CTRL, 0, 0), - PCIE_PWR_CTRL, - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_PWR_CTRL, 0, 0), - PCIH2D_MailBox, - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIH2D_MailBox, 0, 0))); - DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", - PCIECFGREG_TREFUP, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP), - PCIECFGREG_TREFUP_EXT, - dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT))); - DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x Function_Intstatus(0x%x)=0x%x\n", - PCIE_CORE_REG_ERRLOG, - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CORE_REG_ERRLOG, 0, 0), - PCIE_CORE_REG_ERR_ADDR, - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIE_CORE_REG_ERR_ADDR, 0, 0), - PCIFunctionIntstatus(dhd->bus->sih->buscorerev), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0))); - DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x err_hdrlog3(0x%x)=0x%x " - "err_hdrlog4(0x%x)=0x%x\n", - (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0), - (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0), - (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0), - (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0))); - DHD_ERROR(("err_code(0x%x)=0x%x\n", - (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), - si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0))); + if (dhd->bus->sih->buscorerev >= 24) { + + DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x " + "ltssm_hist_2(0x%x)=0x%x " + "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0), + PCIECFGREG_PHY_LTSSM_HIST_1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1), + PCIECFGREG_PHY_LTSSM_HIST_2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2), + PCIECFGREG_PHY_LTSSM_HIST_3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3))); + + DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", + PCIECFGREG_TREFUP, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP), + PCIECFGREG_TREFUP_EXT, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT))); + DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x " + "Function_Intstatus(0x%x)=0x%x " + "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x " + "Power_Intmask(0x%x)=0x%x\n", + PCIE_CORE_REG_ERRLOG, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIE_CORE_REG_ERRLOG, 0, 0), + PCIE_CORE_REG_ERR_ADDR, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIE_CORE_REG_ERR_ADDR, 0, 0), + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0), + PCIFunctionIntmask(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0), + PCIPowerIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0), + PCIPowerIntmask(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0))); + DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x " + "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0))); + DHD_ERROR(("err_code(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0))); + + dhd_pcie_dump_wrapper_regs(dhd); + dhdpcie_hw_war_regdump(dhd->bus); + } #endif /* EXTENDED_PCIE_DEBUG_DUMP */ dhd_pcie_dma_info_dump(dhd); @@ -10392,3 +11493,112 @@ dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus) { return bus->force_bt_quiesce; } + +#ifdef DHD_HP2P +uint16 +dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx) +{ + if (tx) + return bus->hp2p_txcpl_max_items; + else + return bus->hp2p_rxcpl_max_items; +} + +static uint16 +dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val) +{ + if (tx) + bus->hp2p_txcpl_max_items = val; + else + bus->hp2p_rxcpl_max_items = val; + return val; +} +#endif /* DHD_HP2P */ + +static bool +dhd_bus_tcm_test(struct dhd_bus *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + int num = 0; + uint8 *read_buf, *write_buf; + uint8 init_val[NUM_PATTERNS] = { + 0xFFu, /* 11111111 */ + 0x00u, /* 00000000 */ + }; + + if (!bus) { + DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__)); + return FALSE; + } + + read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); + + if (!read_buf) { + DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__)); + return FALSE; + } + + write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); + + if (!write_buf) { + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__)); + return FALSE; + } + + DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize)); + DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS)); + + while (num < NUM_PATTERNS) { + start = bus->dongle_ram_base; + /* Get full mem size */ + size = bus->ramsize; + + memset(write_buf, init_val[num], MEMBLOCK); + while (size > 0) { + read_size = MIN(MEMBLOCK, size); + memset(read_buf, 0, read_size); + + /* Write */ + if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) { + DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret)); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Read */ + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) { + DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret)); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Compare */ + if (memcmp(read_buf, write_buf, read_size)) { + DHD_ERROR(("%s: Mismatch at %x, iter : %d\n", + __FUNCTION__, start, num)); + prhex("Readbuf", read_buf, read_size); + prhex("Writebuf", write_buf, read_size); + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + return FALSE; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + } + num++; + } + + MFREE(bus->dhd->osh, read_buf, MEMBLOCK); + MFREE(bus->dhd->osh, write_buf, MEMBLOCK); + + DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num)); + return TRUE; +} diff --git a/bcmdhd.100.10.315.x/dhd_pcie.h b/bcmdhd.100.10.315.x/dhd_pcie.h index 27acd1c..88964e3 100644 --- a/bcmdhd.100.10.315.x/dhd_pcie.h +++ b/bcmdhd.100.10.315.x/dhd_pcie.h @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie.h 771838 2018-07-12 03:46:37Z $ + * $Id: dhd_pcie.h 816392 2019-04-24 14:39:02Z $ */ #ifndef dhd_pcie_h @@ -59,6 +59,9 @@ #define IDLE_FLOW_RING_TIMEOUT 5000 #endif /* IDLE_TX_FLOW_MGMT */ +/* HWA enabled and inited */ +#define HWA_ACTIVE(dhd) (((dhd)->hwa_enable) && ((dhd)->hwa_inited)) + /* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */ #define IDMA_ENAB(dhd) ((dhd)->idma_enable) #define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited)) @@ -120,6 +123,17 @@ typedef struct ring_sh_info { #define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB) #define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE) +#define PCIE_RELOAD_WAR_ENAB(buscorerev) \ + ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70)) + +/* + * HW JIRA - CRWLPCIEGEN2-672 + * Producer Index Feature which is used by F1 gets reset on F0 FLR + * fixed in REV68 + */ +#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \ + ((buscorerev == 66) || (buscorerev == 67)) + struct dhd_bus; struct dhd_pcie_rev { @@ -184,6 +198,7 @@ typedef struct dhd_bus { sbpcieregs_t *reg; /* Registers for PCIE core */ uint armrev; /* CPU core revision */ + uint coreid; /* CPU core id */ uint ramrev; /* SOCRAM core revision */ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ @@ -285,6 +300,7 @@ typedef struct dhd_bus { bool use_mailbox; bool use_d0_inform; void *bus_lock; + void *backplane_access_lock; enum dhd_bus_low_power_state bus_low_power_state; uint32 hostready_count; /* Number of hostready issued */ #if defined(BCMPCIE_OOB_HOST_WAKE) @@ -296,17 +312,22 @@ typedef struct dhd_bus { ulong isr_intr_disable_count; ulong suspend_intr_disable_count; ulong dpc_return_busdown_count; + ulong non_ours_irq_count; #ifdef BCMPCIE_OOB_HOST_WAKE ulong oob_intr_count; ulong oob_intr_enable_count; ulong oob_intr_disable_count; uint64 last_oob_irq_time; + uint64 last_oob_irq_enable_time; + uint64 last_oob_irq_disable_time; #endif /* BCMPCIE_OOB_HOST_WAKE */ uint64 isr_entry_time; uint64 isr_exit_time; + uint64 dpc_sched_time; uint64 dpc_entry_time; uint64 dpc_exit_time; uint64 resched_dpc_time; + uint64 last_d3_inform_time; uint64 last_process_ctrlbuf_time; uint64 last_process_flowring_time; uint64 last_process_txcpl_time; @@ -317,6 +338,8 @@ typedef struct dhd_bus { uint64 last_suspend_end_time; uint64 last_resume_start_time; uint64 last_resume_end_time; + uint64 last_non_ours_irq_time; + uint8 hwa_enab_bmap; bool idma_enabled; bool ifrm_enabled; bool dar_enabled; @@ -326,6 +349,7 @@ typedef struct dhd_bus { uint8 dma_chan; bool cto_enable; /* enable PCIE CTO Prevention and recovery */ uint32 cto_threshold; /* PCIE CTO timeout threshold */ + bool cto_triggered; /* CTO is triggered */ int pwr_req_ref; bool flr_force_fail; /* user intends to simulate flr force fail */ bool intr_enabled; /* ready to receive interrupts from dongle */ @@ -335,6 +359,10 @@ typedef struct dhd_bus { #endif /* DHD_H2D_LOG_TIME_SYNC */ bool rc_ep_aspm_cap; /* RC and EP ASPM capable */ bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */ + uint16 hp2p_txcpl_max_items; + uint16 hp2p_rxcpl_max_items; + /* PCIE coherent status */ + uint32 coherent_state; } dhd_bus_t; #ifdef DHD_MSI_SUPPORT @@ -365,6 +393,7 @@ extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); extern void dhdpcie_free_irq(dhd_bus_t *bus); extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake); +extern void dhdpcie_dongle_reset(dhd_bus_t *bus); #ifdef DHD_PCIE_NATIVE_RUNTIMEPM extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint); #else @@ -382,6 +411,9 @@ extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val); extern int dhdpcie_disable_irq(dhd_bus_t *bus); extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus); extern int dhdpcie_enable_irq(dhd_bus_t *bus); + +extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus); + extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset); extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, uint32 writeval); @@ -395,6 +427,17 @@ extern int dhdpcie_alloc_resource(dhd_bus_t *bus); extern void dhdpcie_free_resource(dhd_bus_t *bus); extern void dhdpcie_dump_resource(dhd_bus_t *bus); extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr); +void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset); +void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset); +void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset); +#ifdef DHD_SUPPORT_64BIT +void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); +uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset); +#endif // endif extern int dhdpcie_enable_device(dhd_bus_t *bus); @@ -402,15 +445,14 @@ extern int dhdpcie_enable_device(dhd_bus_t *bus); extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); -extern int dhdpcie_get_oob_irq_num(dhd_bus_t *bus); +extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus); +extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus); +extern int dhdpcie_get_oob_irq_level(void); #endif /* BCMPCIE_OOB_HOST_WAKE */ #if defined(CONFIG_ARCH_EXYNOS) #define SAMSUNG_PCIE_VENDOR_ID 0x144d -#if defined(CONFIG_MACH_UNIVERSAL5433) -#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 -#define SAMSUNG_PCIE_CH_NUM -#elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420) #define SAMSUNG_PCIE_DEVICE_ID 0xa575 #define SAMSUNG_PCIE_CH_NUM 1 #elif defined(CONFIG_SOC_EXYNOS8890) @@ -475,14 +517,12 @@ extern int dhdpcie_get_oob_irq_num(dhd_bus_t *bus); #define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID #endif /* CONFIG_ARCH_EXYNOS */ +#define DHD_REGULAR_RING 0 +#define DHD_HP2P_RING 1 + #ifdef USE_EXYNOS_PCIE_RC_PMPATCH -#ifdef CONFIG_MACH_UNIVERSAL5433 -extern int exynos_pcie_pm_suspend(void); -extern int exynos_pcie_pm_resume(void); -#else extern int exynos_pcie_pm_suspend(int ch_num); extern int exynos_pcie_pm_resume(int ch_num); -#endif /* CONFIG_MACH_UNIVERSAL5433 */ #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ #ifdef CONFIG_ARCH_TEGRA @@ -519,12 +559,16 @@ extern int dhdpcie_irq_disabled(struct dhd_bus *bus); static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;} static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; } +static INLINE void +dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus) +{ return; } int dhdpcie_config_check(dhd_bus_t *bus); int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr); int dhdpcie_config_save(dhd_bus_t *bus); int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state); +extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus); extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus); extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus); extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus); @@ -544,9 +588,14 @@ dhd_pcie_corereg_read(si_t *sih, uint val) return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); } -extern int dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char *program, char *nv_path); +extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path, + char *clm_path, char *txcap_path); extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd); extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd); extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus); +#ifdef DHD_HP2P +extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx); +#endif // endif + #endif /* dhd_pcie_h */ diff --git a/bcmdhd.100.10.315.x/dhd_pcie_linux.c b/bcmdhd.100.10.315.x/dhd_pcie_linux.c index 8885b09..08d3776 100644 --- a/bcmdhd.100.10.315.x/dhd_pcie_linux.c +++ b/bcmdhd.100.10.315.x/dhd_pcie_linux.c @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie_linux.c 771838 2018-07-12 03:46:37Z $ + * $Id: dhd_pcie_linux.c 821650 2019-05-24 10:41:54Z $ */ /* include files */ @@ -85,23 +85,17 @@ extern uint32 tpoweron_scale; #endif /* FORCE_TPOWERON */ /* user defined data structures */ -typedef struct dhd_pc_res { - uint32 bar0_size; - void* bar0_addr; - uint32 bar1_size; - void* bar1_addr; -} pci_config_res, *pPci_config_res; - typedef bool (*dhdpcie_cb_fn_t)(void *); typedef struct dhdpcie_info { dhd_bus_t *bus; - osl_t *osh; + osl_t *osh; struct pci_dev *dev; /* pci device handle */ - volatile char *regs; /* pci device memory va */ - volatile char *tcm; /* pci device memory va */ - uint32 tcm_size; /* pci device memory size */ + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 bar1_size; /* pci device memory size */ + uint32 curr_bar1_win; /* current PCIEBar1Window setting */ struct pcos_info *pcos_info; uint16 last_intrstatus; /* to cache intrstatus */ int irq; @@ -189,6 +183,8 @@ static int dhdpcie_pm_system_suspend_noirq(struct device * dev); static int dhdpcie_pm_system_resume_noirq(struct device * dev); #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state); + uint32 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, uint32 writeval); @@ -221,9 +217,6 @@ static struct pci_driver dhdpcie_driver = { id_table: dhdpcie_pci_devid, probe: dhdpcie_pci_probe, remove: dhdpcie_pci_remove, -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) - save_state: NULL, -#endif // endif #if defined(DHD_PCIE_NATIVE_RUNTIMEPM) .driver.pm = &dhd_pcie_pm_ops, #else @@ -578,12 +571,43 @@ dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable) } } +void +dhd_bus_aer_config(dhd_bus_t *bus) +{ + uint32 val; + + DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__)); + val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); + if (val != (uint32)-1) { + val &= ~CORR_ERR_AE; + dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); + } else { + DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", + __FUNCTION__, val)); + } + + DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__)); + val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0); + if (val != (uint32)-1) { + val &= ~CORR_ERR_AE; + dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID, + PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val); + } else { + DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n", + __FUNCTION__, val)); + } +} + static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) { int ret = 0; dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; unsigned long flags; + uint32 i = 0; if (pch) { bus = pch->bus; @@ -594,13 +618,29 @@ static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) BCM_REFERENCE(state); - DHD_GENERAL_LOCK(bus->dhd, flags); if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", __FUNCTION__, bus->dhd->dhd_bus_busy_state)); - DHD_GENERAL_UNLOCK(bus->dhd, flags); - return -EBUSY; + + OSL_DELAY(1000); + /* retry till the transaction is complete */ + while (i < 100) { + OSL_DELAY(1000); + i++; + if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n", + __FUNCTION__, i)); + break; + } + } + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, " + "dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, i, bus->dhd->dhd_bus_busy_state)); + return -EBUSY; + } } + DHD_GENERAL_LOCK(bus->dhd, flags); DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); DHD_GENERAL_UNLOCK(bus->dhd, flags); @@ -777,19 +817,37 @@ static int dhdpcie_pm_system_resume_noirq(struct device * dev) extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +static void +dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state) +{ + DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, " + "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n", + suspend_state, + PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(bus->osh, + PCIECFGREG_BASEADDR0, sizeof(uint32)), + PCIECFGREG_BASEADDR1, + dhd_pcie_config_read(bus->osh, + PCIECFGREG_BASEADDR1, sizeof(uint32)), + PCIE_CFG_PMCSR, + dhd_pcie_config_read(bus->osh, + PCIE_CFG_PMCSR, sizeof(uint32)))); +} + static int dhdpcie_suspend_dev(struct pci_dev *dev) { int ret; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch = pci_get_drvdata(dev); dhd_bus_t *bus = pch->bus; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) if (bus->is_linkdown) { DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); return BCME_ERROR; } #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ - DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); + DHD_ERROR(("%s: Enter\n", __FUNCTION__)); + dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND"); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhd_dpc_tasklet_kill(bus->dhd); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ @@ -798,9 +856,7 @@ static int dhdpcie_suspend_dev(struct pci_dev *dev) pch->state = pci_store_saved_state(dev); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_enable_wake(dev, PCI_D0, TRUE); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) if (pci_is_enabled(dev)) -#endif // endif pci_disable_device(dev); ret = pci_set_power_state(dev, PCI_D3hot); @@ -808,7 +864,8 @@ static int dhdpcie_suspend_dev(struct pci_dev *dev) DHD_ERROR(("%s: pci_set_power_state error %d\n", __FUNCTION__, ret)); } - dev->state_saved = FALSE; +// dev->state_saved = FALSE; + dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND"); return ret; } @@ -844,8 +901,8 @@ static int dhdpcie_resume_dev(struct pci_dev *dev) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) pci_load_and_free_saved_state(dev, &pch->state); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ - DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); - dev->state_saved = TRUE; + DHD_ERROR(("%s: Enter\n", __FUNCTION__)); +// dev->state_saved = TRUE; pci_restore_state(dev); #ifdef FORCE_TPOWERON if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) { @@ -864,6 +921,7 @@ static int dhdpcie_resume_dev(struct pci_dev *dev) goto out; } BCM_REFERENCE(pch); + dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME"); out: return err; } @@ -910,6 +968,136 @@ static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) return bcmerror; } +/** + * dhdpcie_os_setbar1win + * + * Interface function for setting bar1 window in order to allow + * os layer to be aware of current window positon. + * + * @bus: dhd bus context + * @addr: new backplane windows address for BAR1 + */ +void +dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr); + pch->curr_bar1_win = addr; +} + +/** + * dhdpcie_os_chkbpoffset + * + * Check the provided address is within the current BAR1 window, + * if not, shift the window + * + * @bus: dhd bus context + * @offset: back plane address that the caller wants to access + * + * Return: new offset for access + */ +static ulong +dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset) +{ + /* Determine BAR1 backplane window using window size + * Window address mask should be ~(size - 1) + */ + uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1)); + + if (bpwin != pch->curr_bar1_win) { + /* Move BAR1 window */ + dhdpcie_os_setbar1win(pch->bus, bpwin); + } + + return offset - bpwin; +} + +/** + * dhdpcie os layer tcm read/write interface + */ +void +dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); +} + +uint8 +dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset) +{ + volatile uint8 data; + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); + return data; +} + +void +dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); +} + +uint16 +dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset) +{ + volatile uint16 data; + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); + return data; +} + +void +dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); +} + +uint32 +dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset) +{ + volatile uint32 data; + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); + return data; +} + +#ifdef DHD_SUPPORT_64BIT +void +dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); +} + +uint64 +dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset) +{ + volatile uint64 data; + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + offset = dhdpcie_os_chkbpoffset(pch, offset); + data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); + return data; +} +#endif /* DHD_SUPPORT_64BIT */ + uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) { @@ -1030,6 +1218,21 @@ uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) return linkcap; } +static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state) +{ + if (bus->coreid == ARMCA7_CORE_ID) { + if (state) { + /* Sleep */ + bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus, + PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK; + } else { + uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, + 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state; + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val); + } + } +} + int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) { int rc; @@ -1037,6 +1240,7 @@ int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) struct pci_dev *dev = bus->dev; if (state) { + dhdpcie_config_save_restore_coherent(bus, state); #if !defined(BCMPCIE_OOB_HOST_WAKE) dhdpcie_pme_active(bus->osh, state); #endif // endif @@ -1048,30 +1252,28 @@ int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) rc = dhdpcie_resume_host_dev(bus); if (!rc) { rc = dhdpcie_resume_dev(dev); - if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { + if (PCIECTO_ENAB(bus)) { /* reinit CTO configuration * because cfg space got reset at D3 (PERST) */ - dhdpcie_cto_init(bus, bus->cto_enable); + dhdpcie_cto_cfg_init(bus, TRUE); } - if (bus->sih->buscorerev == 66) { + if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { dhdpcie_ssreset_dis_enum_rst(bus); } #if !defined(BCMPCIE_OOB_HOST_WAKE) dhdpcie_pme_active(bus->osh, state); #endif // endif } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + dhdpcie_config_save_restore_coherent(bus, state); if (bus->is_linkdown) { bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; dhd_os_send_hang_message(bus->dhd); } -#endif // endif } return rc; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) static int dhdpcie_device_scan(struct device *dev, void *data) { struct pci_dev *pcidev; @@ -1096,19 +1298,12 @@ static int dhdpcie_device_scan(struct device *dev, void *data) return 0; } -#endif /* LINUX_VERSION >= 2.6.0 */ int dhdpcie_bus_register(void) { int error = 0; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) - if (!(error = pci_module_init(&dhdpcie_driver))) - return 0; - - DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); -#else if (!(error = pci_register_driver(&dhdpcie_driver))) { bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); if (!error) { @@ -1122,7 +1317,6 @@ dhdpcie_bus_register(void) pci_unregister_driver(&dhdpcie_driver); error = BCME_ERROR; } -#endif /* LINUX_VERSION < 2.6.0 */ return error; } @@ -1136,21 +1330,30 @@ dhdpcie_bus_unregister(void) int __devinit dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + int err = 0; DHD_MUTEX_LOCK(); if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); - DHD_MUTEX_UNLOCK(); - return -ENODEV; + err = -ENODEV; + goto exit; } + printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" "(good PCI location)\n", pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); + if (dhdpcie_init_succeeded == TRUE) { + DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n", + __FUNCTION__)); + err = -ENODEV; + goto exit; + } + if (dhdpcie_init (pdev)) { DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); - DHD_MUTEX_UNLOCK(); - return -ENODEV; + err = -ENODEV; + goto exit; } #ifdef DHD_PCIE_NATIVE_RUNTIMEPM @@ -1171,8 +1374,9 @@ dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); +exit: DHD_MUTEX_UNLOCK(); - return 0; + return err; } int @@ -1215,9 +1419,8 @@ dhdpcie_pci_remove(struct pci_dev *pdev) dhdpcie_bus_release(bus); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(pdev)) -#endif // endif pci_disable_device(pdev); #ifdef BCMPCIE_OOB_HOST_WAKE /* pcie os info detach */ @@ -1403,9 +1606,9 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) } dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); - dhdpcie_info->tcm_size = + dhdpcie_info->bar1_size = (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); @@ -1493,14 +1696,14 @@ void dhdpcie_dump_resource(dhd_bus_t *bus) } /* BAR0 */ - DHD_ERROR(("%s: BAR0(VA): 0x%p, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n", + DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n", __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0), DONGLE_REG_MAP_SIZE)); /* BAR1 */ - DHD_ERROR(("%s: BAR1(VA): 0x%p, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n", + DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n", __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2), - pch->tcm_size)); + pch->bar1_size)); } int dhdpcie_init(struct pci_dev *pdev) @@ -1613,6 +1816,7 @@ int dhdpcie_init(struct pci_dev *pdev) dhdpcie_info->bus = bus; bus->is_linkdown = 0; bus->no_bus_init = FALSE; + bus->cto_triggered = 0; bus->rc_dev = NULL; @@ -1620,7 +1824,7 @@ int dhdpcie_init(struct pci_dev *pdev) if (bus->dev->bus) { /* self member of structure pci_bus is bridge device as seen by parent */ bus->rc_dev = bus->dev->bus->self; - DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %p\n", __FUNCTION__, + DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__, bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev)); } else { DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__)); @@ -1697,7 +1901,7 @@ int dhdpcie_init(struct pci_dev *pdev) /* Attach to the OS network interface */ DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); - if (dhd_register_if(bus->dhd, 0, TRUE)) { + if (dhd_attach_net(bus->dhd, TRUE)) { DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); break; } @@ -1759,6 +1963,12 @@ dhdpcie_free_irq(dhd_bus_t *bus) if (bus) { pdev = bus->dev; if (bus->irq_registered) { +#if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150) + /* clean up the affinity_hint before + * the unregistration of PCIe irq + */ + (void)irq_set_affinity_hint(pdev->irq, NULL); +#endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */ free_irq(pdev->irq, bus); bus->irq_registered = FALSE; if (bus->d2h_intr_method == PCIE_MSI) { @@ -1794,11 +2004,11 @@ irqreturn_t dhdpcie_isr(int irq, void *arg) { dhd_bus_t *bus = (dhd_bus_t*)arg; - bus->isr_entry_time = OSL_SYSUPTIME_US(); + bus->isr_entry_time = OSL_LOCALTIME_NS(); if (!dhdpcie_bus_isr(bus)) { DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__)); } - bus->isr_exit_time = OSL_SYSUPTIME_US(); + bus->isr_exit_time = OSL_LOCALTIME_NS(); return IRQ_HANDLED; } @@ -1930,9 +2140,7 @@ dhdpcie_disable_device(dhd_bus_t *bus) return BCME_ERROR; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) if (pci_is_enabled(bus->dev)) -#endif // endif pci_disable_device(bus->dev); return 0; @@ -2047,9 +2255,9 @@ dhdpcie_alloc_resource(dhd_bus_t *bus) } bus->regs = dhdpcie_info->regs; - dhdpcie_info->tcm_size = + dhdpcie_info->bar1_size = (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size); if (!dhdpcie_info->tcm) { DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); @@ -2140,7 +2348,49 @@ dhdpcie_bus_request_irq(struct dhd_bus *bus) } #ifdef BCMPCIE_OOB_HOST_WAKE -int dhdpcie_get_oob_irq_num(dhd_bus_t *bus) +#ifdef CONFIG_BCMDHD_GET_OOB_STATE +extern int dhd_get_wlan_oob_gpio(void); +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ + +int dhdpcie_get_oob_irq_level(void) +{ + int gpio_level; + +#ifdef CONFIG_BCMDHD_GET_OOB_STATE + gpio_level = dhd_get_wlan_oob_gpio(); +#else + gpio_level = BCME_UNSUPPORTED; +#endif /* CONFIG_BCMDHD_GET_OOB_STATE */ + return gpio_level; +} + +int dhdpcie_get_oob_irq_status(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return 0; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return 0; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return 0; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0; +} + +int dhdpcie_get_oob_irq_num(struct dhd_bus *bus) { dhdpcie_info_t *pch; dhdpcie_os_info_t *dhdpcie_osinfo; @@ -2195,9 +2445,11 @@ void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) if (enable) { enable_irq(dhdpcie_osinfo->oob_irq_num); bus->oob_intr_enable_count++; + bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS(); } else { disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); bus->oob_intr_disable_count++; + bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS(); } dhdpcie_osinfo->oob_irq_enabled = enable; } @@ -2211,7 +2463,7 @@ static irqreturn_t wlan_oob_irq(int irq, void *data) DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); bus = (dhd_bus_t *)data; dhdpcie_oob_intr_set(bus, FALSE); - bus->last_oob_irq_time = OSL_SYSUPTIME_US(); + bus->last_oob_irq_time = OSL_LOCALTIME_NS(); bus->oob_intr_count++; #ifdef DHD_WAKE_STATUS { @@ -2347,3 +2599,119 @@ struct device * dhd_bus_to_dev(dhd_bus_t *bus) else return NULL; } + +#define KIRQ_PRINT_BUF_LEN 256 + +void +dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num) +{ + unsigned long flags = 0; + struct irq_desc *desc; + int i; /* cpu iterator */ + struct bcmstrbuf strbuf; + char tmp_buf[KIRQ_PRINT_BUF_LEN]; + + desc = irq_to_desc(irq_num); + if (!desc) { + DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__)); + return; + } + bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN); + raw_spin_lock_irqsave(&desc->lock, flags); + bcm_bprintf(&strbuf, "dhd irq %u:", irq_num); + for_each_online_cpu(i) + bcm_bprintf(&strbuf, "%10u ", + desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0); + if (desc->irq_data.chip) { + if (desc->irq_data.chip->name) + bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name); + else + bcm_bprintf(&strbuf, " %8s", "-"); + } else { + bcm_bprintf(&strbuf, " %8s", "None"); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) + if (desc->irq_data.domain) + bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq); +#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL + bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); +#endif // endif +#endif /* LINUX VERSION > 3.1.0 */ + + if (desc->name) + bcm_bprintf(&strbuf, "-%-8s", desc->name); + + DHD_ERROR(("%s\n", strbuf.origbuf)); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +void +dhd_show_kirqstats(dhd_pub_t *dhd) +{ + unsigned int irq = -1; +#ifdef BCMPCIE + dhdpcie_get_pcieirq(dhd->bus, &irq); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num; +#endif /* BCMSDIO */ + if (irq != -1) { +#ifdef BCMPCIE + DHD_ERROR(("DUMP data kernel irq stats : \n")); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n")); +#endif /* BCMSDIO */ + dhd_print_kirqstats(dhd, irq); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + irq = dhdpcie_get_oob_irq_num(dhd->bus); + if (irq) { + DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n")); + dhd_print_kirqstats(dhd, irq); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} + +#ifdef DHD_FW_COREDUMP +int +dhd_dongle_mem_dump(void) +{ + if (!g_dhd_bus) { + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + dhd_bus_dump_console_buffer(g_dhd_bus); + dhd_prot_debug_info_print(g_dhd_bus->dhd); + + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; + + dhd_bus_mem_dump(g_dhd_bus->dhd); + return 0; +} +EXPORT_SYMBOL(dhd_dongle_mem_dump); +#endif /* DHD_FW_COREDUMP */ + +bool +dhd_bus_check_driver_up(void) +{ + dhd_bus_t *bus; + dhd_pub_t *dhdp; + bool isup = FALSE; + + bus = (dhd_bus_t *)g_dhd_bus; + if (!bus) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return isup; + } + + dhdp = bus->dhd; + if (dhdp) { + isup = dhdp->up; + } + + return isup; +} +EXPORT_SYMBOL(dhd_bus_check_driver_up); diff --git a/bcmdhd.100.10.315.x/dhd_pno.c b/bcmdhd.100.10.315.x/dhd_pno.c index ab4ca58..390fb81 100644 --- a/bcmdhd.100.10.315.x/dhd_pno.c +++ b/bcmdhd.100.10.315.x/dhd_pno.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD) * Prefered Network Offload and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_pno.c 736010 2017-12-13 08:45:59Z $ + * $Id: dhd_pno.c 812762 2019-04-02 09:36:26Z $ */ #if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) @@ -206,7 +206,7 @@ dhd_is_legacy_pno_enabled(dhd_pub_t *dhd) #ifdef GSCAN_SUPPORT static uint64 -convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms) +convert_fw_rel_time_to_systime(struct osl_timespec *ts, uint32 fw_ts_ms) { return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000)); } @@ -227,12 +227,11 @@ dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params, if (gscan_params->epno_cfg.num_epno_ssid > 0) { i = 0; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &gscan_params->epno_cfg.epno_ssid_list, list) { + GCC_DIAGNOSTIC_POP(); if (i++ == idx) { memcpy(res->ssid, iter->SSID, iter->SSID_len); res->ssid_len = iter->SSID_len; @@ -652,11 +651,9 @@ _dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid) return BCME_NOMEM; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, ssid_list, list) { + GCC_DIAGNOSTIC_POP(); pfn_elem_buf[i].infra = htod32(1); pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM); pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth); @@ -680,6 +677,7 @@ _dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid) break; } } + err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); @@ -816,15 +814,10 @@ _dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batc } DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); /* preestimate scan count until which scan result this report is going to end */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(siter, snext, ¶ms_batch->get_batch.expired_scan_results_list, list) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); phead = siter->bestnetheader; while (phead != NULL) { /* if left_size is less than bestheader total size , stop this */ @@ -840,15 +833,10 @@ _dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batc bp += nreadsize = snprintf(bp, nleftsize, "trunc\n"); nleftsize -= nreadsize; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &phead->entry_list, list) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); #ifdef PNO_DEBUG _base_bp = bp; @@ -916,14 +904,9 @@ exit: } params_batch->get_batch.expired_tot_scan_cnt -= cnt; /* set FALSE only if the link list is empty after returning the data */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); params_batch->get_batch.batch_started = FALSE; bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER); DHD_PNO(("%s", RESULTS_END_MARKER)); @@ -946,10 +929,8 @@ _dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool on NULL_CHECK(head, "head is NULL", err); NULL_CHECK(head->next, "head->next is NULL", err); DHD_PNO(("%s enter\n", __FUNCTION__)); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(siter, snext, head, list) { if (only_last) { @@ -978,9 +959,7 @@ _dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool on MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); return removed_scan_cnt; } @@ -1031,19 +1010,15 @@ _dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mod case DHD_PNO_LEGACY_MODE: { struct dhd_pno_ssid *iter, *next; if (params->params_legacy.nssid > 0) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, ¶ms->params_legacy.ssid_list, list) { + GCC_DIAGNOSTIC_POP(); list_del(&iter->list); MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + params->params_legacy.nssid = 0; params->params_legacy.scan_fr = 0; params->params_legacy.pno_freq_expo_max = 0; @@ -1081,18 +1056,13 @@ _dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mod case DHD_PNO_HOTLIST_MODE: { struct dhd_pno_bssid *iter, *next; if (params->params_hotlist.nbssid > 0) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, ¶ms->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); list_del(&iter->list); MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif } params->params_hotlist.scan_fr = 0; params->params_hotlist.nbssid = 0; @@ -1221,14 +1191,12 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) goto exit; } /* convert dhd_pno_bssid to wl_pfn_bssid */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); cnt = 0; tmp_bssid = p_pfn_bssid; list_for_each_entry_safe(iter, next, &_params->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); memcpy(&tmp_bssid->macaddr, &iter->macaddr, ETHER_ADDR_LEN); tmp_bssid->flags = iter->flags; @@ -1241,9 +1209,6 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) break; } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); if (err < 0) { _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; @@ -1762,18 +1727,13 @@ dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params, if (flags & GSCAN_FLUSH_HOTLIST_CFG) { struct dhd_pno_bssid *iter, *next; if (_params->params_gscan.nbssid_hotlist > 0) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &_params->params_gscan.hotlist_bssid_list, list) { + GCC_DIAGNOSTIC_POP(); list_del(&iter->list); MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif } _params->params_gscan.nbssid_hotlist = 0; DHD_PNO(("Flush Hotlist Config\n")); @@ -1783,18 +1743,13 @@ dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params, dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg; if (epno_cfg->num_epno_ssid > 0) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &epno_cfg->epno_ssid_list, list) { + GCC_DIAGNOSTIC_POP(); list_del(&iter->list); MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif epno_cfg->num_epno_ssid = 0; } memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t)); @@ -1873,28 +1828,6 @@ dhd_wait_batch_results_complete(dhd_pub_t *dhd) return err; } -static void * -dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) -{ - gscan_results_cache_t *iter, *results; - dhd_pno_status_info_t *_pno_state; - dhd_pno_params_t *_params; - uint16 num_scan_ids = 0, num_results = 0; - - _pno_state = PNO_GET_PNOSTATE(dhd); - _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; - - iter = results = _params->params_gscan.gscan_batch_cache; - while (iter) { - num_results += iter->tot_count - iter->tot_consumed; - num_scan_ids++; - iter = iter->next; - } - - *len = ((num_results << 16) | (num_scan_ids)); - return results; -} - int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf, bool flush) @@ -1941,6 +1874,15 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); } + if ((_params->params_gscan.nbssid_hotlist + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh, sizeof(struct dhd_pno_bssid)); @@ -2226,13 +2168,11 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) ptr = p_pfn_bssid; /* convert dhd_pno_bssid to wl_pfn_bssid */ DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &gscan_params->hotlist_bssid_list, list) { char buffer_hotlist[64]; + GCC_DIAGNOSTIC_POP(); memcpy(&ptr->macaddr, &iter->macaddr, ETHER_ADDR_LEN); BCM_REFERENCE(buffer_hotlist); @@ -2240,9 +2180,6 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) ptr->flags = iter->flags; ptr++; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); if (err < 0) { @@ -2612,7 +2549,7 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) uint16 count; uint16 fwcount; uint16 fwstatus = PFN_INCOMPLETE; - struct timespec tm_spec; + struct osl_timespec tm_spec; /* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2 * net_info and subnet_info structures are compatible in size and SSID offset, @@ -2677,7 +2614,7 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) __FUNCTION__, err)); goto exit_mutex_unlock; } - get_monotonic_boottime(&tm_spec); + osl_get_monotonic_boottime(&tm_spec); if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) { fwstatus = plbestnet_v1->status; @@ -2964,6 +2901,28 @@ exit: #endif /* GSCAN_SUPPORT */ #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, uint32 *len) @@ -3000,19 +2959,22 @@ dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE; ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN; + ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; ptr->max_scan_reporting_threshold = 100; - ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_hotlist_ssids = 0; + ptr->max_significant_wifi_change_aps = 0; + ptr->max_bssid_history_entries = 0; ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM; ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID; ptr->max_white_list_ssid = MAX_WHITELIST_SSID; ret = (void *)ptr; *len = sizeof(dhd_pno_gscan_capabilities_t); break; -#ifdef GSCAN_SUPPORT + case DHD_PNO_GET_BATCH_RESULTS: ret = dhd_get_gscan_batch_results(dhd, len); break; -#endif /* GSCAN_SUPPORT */ case DHD_PNO_GET_CHANNEL_LIST: if (info) { uint16 ch_list[WL_NUMCHANNELS]; @@ -3048,7 +3010,7 @@ dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, *len = 0; } else { mem_needed = sizeof(uint32) * nchan; - p = (uint32 *)MALLOCZ(dhd->osh, mem_needed); + p = (uint32 *)MALLOC(dhd->osh, mem_needed); if (!p) { DHD_ERROR(("%s: Unable to malloc %d bytes\n", __FUNCTION__, mem_needed)); @@ -3087,7 +3049,6 @@ dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, epno_cfg->num_epno_ssid++; list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list); ret = ssid_elem; - *len = sizeof(dhd_pno_ssid_t); break; default: DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); @@ -3163,18 +3124,13 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) /* this is a first try to get batching results */ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { /* move the scan_results_list to expired_scan_results_lists */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(siter, snext, &_params->params_batch.get_batch.scan_results_list, list) { + GCC_DIAGNOSTIC_POP(); list_move_tail(&siter->list, &_params->params_batch.get_batch.expired_scan_results_list); } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif _params->params_batch.get_batch.top_node_cnt = 0; _params->params_batch.get_batch.expired_tot_scan_cnt = _params->params_batch.get_batch.tot_scan_cnt; @@ -3467,18 +3423,13 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) /* This is a first try to get batching results */ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { /* move the scan_results_list to expired_scan_results_lists */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(siter, snext, &_params->params_batch.get_batch.scan_results_list, list) { + GCC_DIAGNOSTIC_POP(); list_move_tail(&siter->list, &_params->params_batch.get_batch.expired_scan_results_list); } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif /* reset gloval values after moving to expired list */ _params->params_batch.get_batch.top_node_cnt = 0; _params->params_batch.get_batch.expired_tot_scan_cnt = @@ -3514,14 +3465,10 @@ _dhd_pno_get_batch_handler(struct work_struct *work) dhd_pub_t *dhd; struct dhd_pno_batch_params *params_batch; DHD_PNO(("%s enter\n", __FUNCTION__)); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); _pno_state = container_of(work, struct dhd_pno_status_info, work); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); + dhd = _pno_state->dhd; if (dhd == NULL) { DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); @@ -3674,19 +3621,14 @@ dhd_pno_stop_for_batch(dhd_pub_t *dhd) } i = 0; /* convert dhd_pno_bssid to wl_pfn_bssid */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry_safe(iter, next, &_params->params_hotlist.bssid_list, list) { + GCC_DIAGNOSTIC_POP(); memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); p_pfn_bssid[i].flags = iter->flags; i++; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); if (err < 0) { _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; @@ -4008,19 +3950,14 @@ dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int u32 bi_length = 0; uint8 channel; uint32 mem_needed; - struct timespec ts; + struct osl_timespec ts; u32 bi_ie_length = 0; u32 bi_ie_offset = 0; *size = 0; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); gscan_result = (wl_gscan_result_t *)data; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); if (!gscan_result) { DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); goto exit; @@ -4072,7 +4009,7 @@ dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int result->fixed.rssi = (int32) bi->RSSI; result->fixed.rtt = 0; result->fixed.rtt_sd = 0; - get_monotonic_boottime(&ts); + osl_get_monotonic_boottime(&ts); result->fixed.ts = (uint64) TIMESPEC_TO_US(ts); result->fixed.beacon_period = dtoh16(bi->beacon_period); result->fixed.capability = dtoh16(bi->capability); @@ -4100,23 +4037,15 @@ dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) { -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data; wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data; wl_pfn_net_info_v1_t *net; wl_pfn_net_info_v2_t *net_v2; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) { - /* Check if count of pfn results is corrupted */ - if (pfn_result->count > EVENT_MAX_NETCNT_V1) { - DHD_ERROR(("%s event %d: pfn results count %d" - "exceeds the max limit\n", - __FUNCTION__, event, pfn_result->count)); + if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) { + DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n", + __FUNCTION__, event, pfn_result->count)); return NULL; } count = pfn_result->count; @@ -4155,11 +4084,9 @@ dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int results[i].rssi, results[i].flags)); } } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) { - /* Check if count of pfn results is corrupted */ - if (pfn_result_v2->count > EVENT_MAX_NETCNT_V2) { - DHD_ERROR(("%s event %d: pfn results count %d" - "exceeds the max limit\n", - __FUNCTION__, event, pfn_result_v2->count)); + if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) { + DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n", + __FUNCTION__, event, pfn_result->count)); return NULL; } count = pfn_result_v2->count; @@ -4212,21 +4139,14 @@ dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, void *ptr = NULL; dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); struct dhd_pno_gscan_params *gscan_params; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data; wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif wifi_gscan_result_t *hotlist_found_array; wl_pfn_net_info_v1_t *pnetinfo; wl_pfn_net_info_v2_t *pnetinfo_v2; gscan_results_cache_t *gscan_hotlist_cache; - uint32 malloc_size = 0, i, total = 0; - struct timespec tm_spec; + u32 malloc_size = 0, i, total = 0; + struct osl_timespec tm_spec; uint16 fwstatus; uint16 fwcount; @@ -4250,7 +4170,7 @@ dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, return ptr; } - get_monotonic_boottime(&tm_spec); + osl_get_monotonic_boottime(&tm_spec); malloc_size = sizeof(gscan_results_cache_t) + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); @@ -4314,7 +4234,7 @@ dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, return ptr; } - get_monotonic_boottime(&tm_spec); + osl_get_monotonic_boottime(&tm_spec); malloc_size = sizeof(gscan_results_cache_t) + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); gscan_hotlist_cache = diff --git a/bcmdhd.100.10.315.x/dhd_pno.h b/bcmdhd.100.10.315.x/dhd_pno.h index 66c41b6..fa4e245 100644 --- a/bcmdhd.100.10.315.x/dhd_pno.h +++ b/bcmdhd.100.10.315.x/dhd_pno.h @@ -2,7 +2,7 @@ * Header file of Broadcom Dongle Host Driver (DHD) * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_pno.h 722186 2017-09-19 07:03:42Z $ + * $Id: dhd_pno.h 805174 2019-02-15 17:26:01Z $ */ #ifndef __DHD_PNO_H__ @@ -348,6 +348,11 @@ typedef struct dhd_epno_results { struct ether_addr bssid; } dhd_epno_results_t; +typedef struct dhd_pno_swc_evt_param { + uint16 results_rxed_so_far; + wl_pfn_significant_net_t *change_array; +} dhd_pno_swc_evt_param_t; + typedef struct wifi_gscan_result { uint64 ts; /* Time of discovery */ char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ @@ -384,8 +389,10 @@ typedef struct dhd_pno_gscan_capabilities { int max_ap_cache_per_scan; int max_rssi_sample_size; int max_scan_reporting_threshold; - int max_hotlist_aps; + int max_hotlist_bssids; + int max_hotlist_ssids; int max_significant_wifi_change_aps; + int max_bssid_history_entries; int max_epno_ssid_crc32; int max_epno_hidden_ssid; int max_white_list_ssid; @@ -402,6 +409,8 @@ struct dhd_pno_gscan_params { uint8 bestn; uint8 mscan; uint8 buffer_threshold; + uint8 swc_nbssid_threshold; + uint8 swc_rssi_window_size; uint8 lost_ap_window; uint8 nchannel_buckets; uint8 reason; @@ -411,9 +420,12 @@ struct dhd_pno_gscan_params { gscan_results_cache_t *gscan_batch_cache; gscan_results_cache_t *gscan_hotlist_found; gscan_results_cache_t*gscan_hotlist_lost; + uint16 nbssid_significant_change; uint16 nbssid_hotlist; + struct dhd_pno_swc_evt_param param_significant; struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; struct list_head hotlist_bssid_list; + struct list_head significant_bssid_list; dhd_epno_ssid_cfg_t epno_cfg; uint32 scan_id; }; diff --git a/bcmdhd.100.10.315.x/dhd_proto.h b/bcmdhd.100.10.315.x/dhd_proto.h index 3724e7f..b607478 100644 --- a/bcmdhd.100.10.315.x/dhd_proto.h +++ b/bcmdhd.100.10.315.x/dhd_proto.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_proto.h 769450 2018-06-26 10:16:46Z $ + * $Id: dhd_proto.h 814912 2019-04-15 10:38:59Z $ */ #ifndef _dhd_proto_h_ @@ -45,6 +45,9 @@ #define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT #endif /* IOCTL_RESP_TIMEOUT */ +/* In milli second default value for Production FW */ +#define IOCTL_DMAXFER_TIMEOUT 10000 + #ifndef MFG_IOCTL_RESP_TIMEOUT #define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ #endif /* MFG_IOCTL_RESP_TIMEOUT */ @@ -125,8 +128,8 @@ extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, void **pkt, uint32 *free_buf_count); #ifdef BCMPCIE -extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound); -extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype); +extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype); extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound); extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd); @@ -137,7 +140,7 @@ extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num); -extern dma_xfer_status_t dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd); +extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result); extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); @@ -159,13 +162,24 @@ extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *ms extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id); extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); extern void dhd_prot_reset(dhd_pub_t *dhd); +extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); #ifdef IDLE_TX_FLOW_MGMT extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count); extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); #endif /* IDLE_TX_FLOW_MGMT */ extern int dhd_prot_init_info_rings(dhd_pub_t *dhd); +#ifdef DHD_HP2P +extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd); +#endif /* DHD_HP2P */ + +extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd); +extern void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd); +extern void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd); +#else +static INLINE void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd) { return; } +static INLINE void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd) { return; } #endif /* BCMPCIE */ #ifdef DHD_LB @@ -212,7 +226,17 @@ void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); #define DHD_PROTOCOL "unknown" #endif /* proto */ -void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len); -void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff); +int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len); +int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff); + +#ifdef DHD_HP2P +extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable); +extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val); +#endif // endif +#ifdef DHD_MAP_LOGGING +extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp); +#endif /* DHD_MAP_LOGGING */ #endif /* _dhd_proto_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_rtt.c b/bcmdhd.100.10.315.x/dhd_rtt.c index f51b850..a983a15 100644 --- a/bcmdhd.100.10.315.x/dhd_rtt.c +++ b/bcmdhd.100.10.315.x/dhd_rtt.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -40,11 +40,13 @@ #include #include #include +#include #include #include #include #include +#include #include #ifdef WL_CFG80211 #include @@ -63,11 +65,14 @@ static DEFINE_SPINLOCK(noti_list_lock); } \ } while (0) -#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED) -#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED) #define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ (ts).tv_nsec / NSEC_PER_USEC) +#undef DHD_RTT_MEM +#undef DHD_RTT_ERR +#define DHD_RTT_MEM DHD_LOG_MEM +#define DHD_RTT_ERR DHD_ERROR + #define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */ #define FTM_AVAIL_MAX_SLOTS 32 #define FTM_MAX_CONFIGS 10 @@ -75,9 +80,11 @@ static DEFINE_SPINLOCK(noti_list_lock); #define FTM_DEFAULT_SESSION 1 #define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */ #define FTM_INVALID -1 -#define FTM_DEFAULT_CNT_20M 12 -#define FTM_DEFAULT_CNT_40M 10 -#define FTM_DEFAULT_CNT_80M 5 +#define FTM_DEFAULT_CNT_20M 24u +#define FTM_DEFAULT_CNT_40M 16u +#define FTM_DEFAULT_CNT_80M 11u +/* To handle congestion env, set max dur/timeout */ +#define FTM_MAX_BURST_DUR_TMO_MS 128u /* convenience macros */ #define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10) @@ -94,7 +101,13 @@ static DEFINE_SPINLOCK(noti_list_lock); /* broadcom specific set to have more accurate data */ #define ENABLE_VHT_ACK #define CH_MIN_5G_CHANNEL 34 -#define CH_MIN_2G_CHANNEL 1 + +/* CUR ETH became obsolete with this major version onwards */ +#define RTT_IOV_CUR_ETH_OBSOLETE 12 + +/* PROXD TIMEOUT */ +#define DHD_RTT_TIMER_INTERVAL_MS 5000u +#define DHD_NAN_RTT_TIMER_INTERVAL_MS 20000u struct rtt_noti_callback { struct list_head list; @@ -138,6 +151,7 @@ typedef struct ftm_config_param_info { uint32 data32; uint16 data16; uint8 data8; + uint32 event_mask; }; } ftm_config_param_info_t; @@ -160,19 +174,28 @@ static uint16 rtt_result_ver(uint16 tlvid, const uint8 *p_data); static int -dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data, +dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data, uint16 tlvid, uint16 len); static int -dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data, +dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data, uint16 tlvid, uint16 len); static wifi_rate_t dhd_rtt_convert_rate_to_host(uint32 ratespec); +#if defined(WL_CFG80211) && defined(RTT_DEBUG) +const char * +ftm_cmdid_to_str(uint16 cmdid); +#endif /* WL_CFG80211 && RTT_DEBUG */ + #ifdef WL_CFG80211 static int dhd_rtt_start(dhd_pub_t *dhd); +static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status, + struct ether_addr *addr); +static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd); +static void dhd_rtt_timeout_work(struct work_struct *work); #endif /* WL_CFG80211 */ static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0}; @@ -203,59 +226,6 @@ static const ftm_status_map_host_entry_t ftm_status_map_info[] = { {WL_PROXD_E_OK, RTT_STATUS_SUCCESS} }; -/* ftm tlv-id mapping */ -static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = { - /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */ - { WL_PROXD_TLV_ID_NONE, "none" }, - { WL_PROXD_TLV_ID_METHOD, "method" }, - { WL_PROXD_TLV_ID_FLAGS, "flags" }, - { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" }, - { WL_PROXD_TLV_ID_TX_POWER, "tx power" }, - { WL_PROXD_TLV_ID_RATESPEC, "ratespec" }, - { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" }, - { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" }, - { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" }, - { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" }, - { WL_PROXD_TLV_ID_NUM_BURST, "num burst" }, - { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" }, - { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" }, - { WL_PROXD_TLV_ID_BSSID, "bssid" }, - { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" }, - { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" }, - { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" }, - { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" }, - { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" }, - { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" }, - { WL_PROXD_TLV_ID_LCI_REQ, "lci req" }, - { WL_PROXD_TLV_ID_LCI, "lci" }, - { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" }, - { WL_PROXD_TLV_ID_CIVIC, "civic" }, - { WL_PROXD_TLV_ID_AVAIL, "availability" }, - { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" }, - { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" }, - { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" }, - { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" }, - { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" }, - { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" }, - /* output - 512 + x */ - { WL_PROXD_TLV_ID_STATUS, "status" }, - { WL_PROXD_TLV_ID_COUNTERS, "counters" }, - { WL_PROXD_TLV_ID_INFO, "info" }, - { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" }, - { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" }, - { WL_PROXD_TLV_ID_SESSION_INFO, "session info" }, - { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" }, - { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" }, - /* debug tlvs can be added starting 1024 */ - { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" }, - { WL_PROXD_TLV_ID_COLLECT, "collect" }, - { WL_PROXD_TLV_ID_STRBUF, "result" }, - { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" }, - { WL_PROXD_TLV_ID_RI_RR, "ri_rr" }, - { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"}, - { WL_PROXD_TLV_ID_MF_STATS_DATA, "mf_stats_data"} -}; - static const ftm_strmap_entry_t ftm_event_type_loginfo[] = { /* wl_proxd_event_type_t, text-string */ { WL_PROXD_EVENT_NONE, "none" }, @@ -295,17 +265,6 @@ static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = { { WL_PROXD_SESSION_STATE_NONE, "none" } }; -/* -* ranging-state --> text string mapping -*/ -static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = { - /* wl_proxd_ranging_state_t, text string */ - { WL_PROXD_RANGING_STATE_NONE, "none" }, - { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" }, - { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" }, - { WL_PROXD_RANGING_STATE_DONE, "done" }, -}; - /* * status --> text string mapping */ @@ -348,28 +307,6 @@ static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = { { WL_PROXD_TMU_PICO_SEC, "ps" } }; -#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK) -#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ) -#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ) -#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ) -#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ) - -#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE) -#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \ - (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \ - (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC)) -#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0) -#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0) -#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0) -#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0) -#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) -#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) -#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) -#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \ - ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec)) -/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */ -#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec) - struct ieee_80211_mcs_rate_info { uint8 constellation_bits; uint8 coding_q; @@ -583,8 +520,7 @@ ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entrie return "invalid"; } -#ifdef RTT_DEBUG - +#if defined(WL_CFG80211) && defined(RTT_DEBUG) /* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */ #define DEF_STRMAP_ENTRY(id) { (id), #id } @@ -622,7 +558,7 @@ ftm_cmdid_to_str(uint16 cmdid) { return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map)); } -#endif /* RTT_DEBUG */ +#endif /* WL_CFG80211 && RTT_DEBUG */ /* * convert BCME_xxx error codes into related error strings @@ -693,7 +629,7 @@ rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsi status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN); if (status != BCME_OK) { - DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n", + DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n", __FUNCTION__, p_subcmd_info->cmdid, status)); return status; } @@ -705,7 +641,7 @@ rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsi tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE; if (tlvs_len < 0) { - DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n", + DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n", __FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE)); tlvs_len = 0; } @@ -724,7 +660,7 @@ rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id, wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize) { uint16 proxd_iovsize; - uint16 kflags; + uint32 kflags; wl_proxd_tlv_t *p_tlv; wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL; @@ -735,7 +671,7 @@ rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id, p_proxd_iov = kzalloc(proxd_iovsize, kflags); if (p_proxd_iov == NULL) { - DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize)); + DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize)); return NULL; } @@ -836,11 +772,6 @@ dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_inf } #endif /* WL_CFG80211 */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - /* gets the length and returns the version * of the wl_proxd_collect_event_t version */ @@ -974,7 +905,7 @@ rtt_result_ver(uint16 tlvid, const uint8 *p_data) } break; default: - DHD_ERROR(("%s: > Unsupported TLV ID %d\n", + DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n", __FUNCTION__, tlvid)); break; } @@ -1036,15 +967,15 @@ rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len) expected_rtt_result_ver = rtt_result_ver(tlvid, p_data); switch (expected_rtt_result_ver) { case WL_PROXD_RTT_RESULT_VERSION_1: - ret = dhd_rtt_convert_results_to_host_v1((rtt_report_t *)ctx, + ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx, p_data, tlvid, len); break; case WL_PROXD_RTT_RESULT_VERSION_2: - ret = dhd_rtt_convert_results_to_host_v2((rtt_report_t *)ctx, + ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx, p_data, tlvid, len); break; default: - DHD_ERROR((" > Unsupported RTT_RESULT version\n")); + DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n")); ret = BCME_UNSUPPORTED; break; } @@ -1069,13 +1000,15 @@ rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len) ctx, p_data, len); break; case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA: + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n")); DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32)))); - for (i = 0; i < (len/sizeof(chan_data_entry)); i++) { + for (i = 0; (uint16)i < (len/sizeof(chan_data_entry)); i++) { uint32 *p = (uint32*)p_data; chan_data_entry = ltoh32_ua(p + i); DHD_RTT(("\t%u\n", chan_data_entry)); } + GCC_DIAGNOSTIC_POP(); break; case WL_PROXD_TLV_ID_MF_STATS_DATA: DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n")); @@ -1083,7 +1016,7 @@ rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len) rtt_prhex("", p_data, len); break; default: - DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid)); + DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid)); ret = BCME_ERROR; break; } @@ -1123,7 +1056,7 @@ rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_t ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32); if (ret != BCME_OK) { - DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n", + DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n", __FUNCTION__, ret)); goto exit; } @@ -1200,7 +1133,7 @@ rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_t break; } if (ret != BCME_OK) { - DHD_ERROR(("%s bad TLV ID : %d\n", + DHD_RTT_ERR(("%s bad TLV ID : %d\n", __FUNCTION__, p_config_param_info->tlvid)); break; } @@ -1209,7 +1142,7 @@ rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_t p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data, BCM_XTLV_OPTION_ALIGN32); if (ret != BCME_OK) { - DHD_ERROR(("%s: bcm_pack_xltv_entry() failed," + DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed," " status=%d\n", __FUNCTION__, ret)); break; } @@ -1251,6 +1184,50 @@ dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id) return dhd_rtt_common_set_handler(dhd, &subcmd_info, WL_PROXD_METHOD_FTM, session_id); } +#ifdef WL_NAN +int +dhd_rtt_delete_nan_session(dhd_pub_t *dhd) +{ + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg); + return BCME_OK; +} +#endif /* WL_NAN */ +/* API to find out if the given Peer Mac from FTM events +* is nan-peer. Based on this we will handle the SESSION_END +* event. For nan-peer FTM_SESSION_END event is ignored and handled in +* nan-ranging-cancel or nan-ranging-end event. +*/ +static bool +dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac) +{ +#ifdef WL_NAN + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + nan_ranging_inst_t *ranging_inst = NULL; + bool ret = FALSE; + + if (cfg->nan_enable == FALSE || ETHER_ISNULLADDR(peer_mac)) { + goto exit; + } + + ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac); + if (ranging_inst) { + DHD_RTT((" RTT peer is of type NAN\n")); + ret = TRUE; + goto exit; + } +exit: + return ret; +#else + return FALSE; +#endif /* WL_NAN */ +} #ifdef WL_NAN static int @@ -1261,23 +1238,62 @@ dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target) struct wireless_dev *wdev = ndev_to_wdev(dev); struct wiphy *wiphy = wdev->wiphy; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + wl_nan_ev_rng_rpt_ind_t range_res; + nan_ranging_inst_t *ranging_inst = NULL; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + NAN_MUTEX_LOCK(); + + bzero(&range_res, sizeof(range_res)); + + if (!rtt_status) { + err = BCME_NOTENABLED; + goto done; + } + + if (!cfg->nan_enable) { /* If nan is not enabled report error */ + err = BCME_NOTENABLED; + goto done; + } + + /* check if new ranging session allowed */ + if (!wl_cfgnan_ranging_allowed(cfg)) { + /* responder should be in progress because initiator requests are + * queued in DHD. Since initiator has more proef cancel responder + * sessions + */ + wl_cfgnan_cancel_rng_responders(dev, cfg); + } - nan_ranging_inst_t *ranging_inst; ranging_inst = wl_cfgnan_get_ranging_inst(cfg, - &rtt_target->addr, 0, TRUE); - if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) { - WL_DBG(("Trigger nan based range request\n")); - err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), - cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD); - if (unlikely(err)) { - WL_ERR(("Failed to trigger ranging, ret = (%d)\n", err)); - /* Send an event reporting the failure */ - err = wl_cfgvendor_send_as_rtt_legacy_event(cfg->wdev->wiphy, - bcmcfg_to_prmry_ndev(cfg), NULL, RTT_STATUS_FAILURE); - memset(ranging_inst, 0, sizeof(*ranging_inst)); - } - cfg->nancfg.range_type = LEGACY_NAN_RTT; + &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR); + if (!ranging_inst) { + err = BCME_NORESOURCE; + goto done; + } + + DHD_RTT(("Trigger nan based range request\n")); + err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), + cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE); + if (unlikely(err)) { + goto done; + } + ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED; + ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR; + /* schedule proxd timeout */ + schedule_delayed_work(&rtt_status->proxd_timeout, + msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS)); +done: + if (err) { /* notify failure RTT event to host */ + DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err)); + dhd_rtt_handle_nan_rtt_session_end(dhd, &rtt_target->addr); + /* try to reset geofence */ + if (ranging_inst) { + wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst, + RTT_SCHED_DIR_TRIGGER_FAIL); + } } + NAN_MUTEX_UNLOCK(); return err; } #endif /* WL_NAN */ @@ -1303,7 +1319,7 @@ dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, FTM_IOC_BUFSZ, &proxd_iovsize); if (p_proxd_iov == NULL) { - DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n", + DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n", __FUNCTION__, FTM_IOC_BUFSZ)); return BCME_NOMEM; } @@ -1326,7 +1342,7 @@ dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE); if (ret != BCME_OK) { - DHD_ERROR(("%s : failed to set config\n", __FUNCTION__)); + DHD_RTT_ERR(("%s : failed to set config\n", __FUNCTION__)); } } /* clean up */ @@ -1378,7 +1394,7 @@ dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) chanspec = wf_chspec_80(center_chan, primary_chan); break; default: - DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); + DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width)); bw = -1; break; } @@ -1399,27 +1415,51 @@ dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) { int err = BCME_OK; int idx; - rtt_status_info_t *rtt_status; - NULL_CHECK(params, "params is NULL", err); + rtt_status_info_t *rtt_status = NULL; + struct net_device *dev = NULL; + NULL_CHECK(params, "params is NULL", err); NULL_CHECK(dhd, "dhd is NULL", err); + + dev = dhd_linux_get_primary_netdev(dhd); rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(dev, "dev is NULL", err); + + mutex_lock(&rtt_status->rtt_work_mutex); if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) { - DHD_ERROR(("doesn't support RTT \n")); - return BCME_ERROR; - } - if (rtt_status->status != RTT_STOPPED) { - DHD_ERROR(("rtt is already started\n")); - return BCME_BUSY; + DHD_RTT_ERR(("doesn't support RTT \n")); + err = BCME_ERROR; + goto exit; } + DHD_RTT(("%s enter\n", __FUNCTION__)); + if (params->rtt_target_cnt > 0) { +#ifdef WL_NAN + /* cancel ongoing geofence RTT if there */ + if ((err = wl_cfgnan_suspend_geofence_rng_session(dev, + NULL, RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0)) != BCME_OK) { + goto exit; + } +#endif /* WL_NAN */ + } else { + err = BCME_BADARG; + goto exit; + } + + mutex_lock(&rtt_status->rtt_mutex); + if (rtt_status->status != RTT_STOPPED) { + DHD_RTT_ERR(("rtt is already started\n")); + err = BCME_BUSY; + goto exit; + } memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt; memcpy(rtt_status->rtt_config.target_info, params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt)); rtt_status->status = RTT_STARTED; + DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt)); /* start to measure RTT from first device */ /* find next target to trigger RTT */ for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { @@ -1434,196 +1474,893 @@ dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) } if (idx < rtt_status->rtt_config.rtt_target_cnt) { DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx)); + rtt_status->rtt_sched_reason = RTT_SCHED_HOST_TRIGGER; + /* Cancel pending retry timer if any */ + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work(&rtt_status->rtt_retry_timer); + } schedule_work(&rtt_status->work); } +exit: + mutex_unlock(&rtt_status->rtt_mutex); + mutex_unlock(&rtt_status->rtt_work_mutex); return err; } -int -dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +#ifdef WL_NAN +void +dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd) { - int err = BCME_OK; -#ifdef WL_CFG8011 - int i = 0, j = 0; - rtt_status_info_t *rtt_status; - rtt_results_header_t *entry, *next; - rtt_result_t *rtt_result, *next2; - struct rtt_noti_callback *iter; - - NULL_CHECK(dhd, "dhd is NULL", err); - rtt_status = GET_RTTSTATE(dhd); - NULL_CHECK(rtt_status, "rtt_status is NULL", err); - if (rtt_status->status == RTT_STOPPED) { - DHD_ERROR(("rtt is not started\n")); - return BCME_OK; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; } - DHD_RTT(("%s enter\n", __FUNCTION__)); - mutex_lock(&rtt_status->rtt_mutex); - for (i = 0; i < mac_cnt; i++) { - for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { - if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, - ETHER_ADDR_LEN)) { - rtt_status->rtt_config.target_info[j].disable = TRUE; - } - } + + GEOFENCE_RTT_LOCK(rtt_status); + memset_s(&rtt_status->geofence_cfg, sizeof(rtt_status->geofence_cfg), + 0, sizeof(rtt_status->geofence_cfg)); + + /* initialize non zero params of geofence cfg */ + rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX; + rtt_status->geofence_cfg.geofence_rtt_interval = DHD_RTT_RETRY_TIMER_INTERVAL_MS; + GEOFENCE_RTT_UNLOCK(rtt_status); + return; +} + +#ifdef RTT_GEOFENCE_CONT +void +dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; } - if (rtt_status->all_cancel) { - /* cancel all of request */ - rtt_status->status = RTT_STOPPED; - DHD_RTT(("current RTT process is cancelled\n")); - /* remove the rtt results in cache */ - if (!list_empty(&rtt_status->rtt_results_cache)) { - /* Iterate rtt_results_header list */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry_safe(entry, next, - &rtt_status->rtt_results_cache, list) { - list_del(&entry->list); - /* Iterate rtt_result list */ - list_for_each_entry_safe(rtt_result, next2, - &entry->result_list, list) { - list_del(&rtt_result->list); - kfree(rtt_result); - } - kfree(entry); - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - } - /* send the rtt complete event to wake up the user process */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { - iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - /* reinitialize the HEAD */ - INIT_LIST_HEAD(&rtt_status->rtt_results_cache); - /* clear information for rtt_config */ - rtt_status->rtt_config.rtt_target_cnt = 0; - memset(rtt_status->rtt_config.target_info, 0, - TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); - rtt_status->cur_idx = 0; - dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); - dhd_rtt_ftm_enable(dhd, FALSE); + GEOFENCE_RTT_LOCK(rtt_status); + *geofence_cont = rtt_status->geofence_cfg.geofence_cont; + GEOFENCE_RTT_UNLOCK(rtt_status); +} + +void +dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; } - mutex_unlock(&rtt_status->rtt_mutex); -#endif /* WL_CFG80211 */ - return err; + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.geofence_cont = geofence_cont; + DHD_RTT(("dhd_rtt_set_geofence_cont_override, geofence_cont = %d\n", + rtt_status->geofence_cfg.geofence_cont)); + GEOFENCE_RTT_UNLOCK(rtt_status); } +#endif /* RTT_GEOFENCE_CONT */ -#ifdef WL_CFG80211 -static int -dhd_rtt_start(dhd_pub_t *dhd) +#ifdef RTT_GEOFENCE_INTERVAL +void +dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval) { - int err = BCME_OK; - char eabuf[ETHER_ADDR_STR_LEN]; - char chanbuf[CHANSPEC_STR_LEN]; - int pm = PM_OFF; - int ftm_cfg_cnt = 0; - int ftm_param_cnt = 0; - uint32 rspec = 0; - ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; - ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; - rtt_target_info_t *rtt_target; - rtt_status_info_t *rtt_status; - struct net_device *dev = dhd_linux_get_primary_netdev(dhd); - u8 ioctl_buf[WLC_IOCTL_SMLEN]; - NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.geofence_rtt_interval = interval; + DHD_RTT(("dhd_rtt_set_geofence_rtt_interval: geofence interval = %d\n", + rtt_status->geofence_cfg.geofence_rtt_interval)); + GEOFENCE_RTT_UNLOCK(rtt_status); +} +#endif /* RTT_GEOFENCE_INTERVAL */ - rtt_status = GET_RTTSTATE(dhd); - NULL_CHECK(rtt_status, "rtt_status is NULL", err); +/* sets geofence role concurrency state TRUE/FALSE */ +void +dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; + } + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.role_concurr_state = state; + GEOFENCE_RTT_UNLOCK(rtt_status); +} - DHD_RTT(("Enter %s\n", __FUNCTION__)); - if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { - err = BCME_RANGE; - DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx)); - if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { - DHD_ERROR(("STA is set as Target/Responder \n")); - return BCME_ERROR; - } - goto exit; +/* returns TRUE if geofence role concurrency constraint exists */ +bool +dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return FALSE; } - if (RTT_IS_STOPPED(rtt_status)) { - DHD_RTT(("RTT is stopped\n")); - goto exit; + return rtt_status->geofence_cfg.role_concurr_state; +} + +int8 +dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return 0; } - rtt_status->pm = PM_OFF; - err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); - if (err) { - DHD_ERROR(("Failed to get the PM value\n")); - } else { - err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); - if (err) { - DHD_ERROR(("Failed to set the PM\n")); - rtt_status->pm_restore = FALSE; - } else { - rtt_status->pm_restore = TRUE; - } + return rtt_status->geofence_cfg.geofence_target_cnt; +} + +/* sets geofence rtt state TRUE/FALSE */ +void +dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + return; } + GEOFENCE_RTT_LOCK(rtt_status); + rtt_status->geofence_cfg.rtt_in_progress = state; + GEOFENCE_RTT_UNLOCK(rtt_status); +} - mutex_lock(&rtt_status->rtt_mutex); - /* Get a target information */ - rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; - mutex_unlock(&rtt_status->rtt_mutex); - DHD_RTT(("%s enter\n", __FUNCTION__)); +/* returns TRUE if geofence rtt is in progress */ +bool +dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); -#ifdef WL_NAN - if (rtt_target->peer == RTT_PEER_NAN && - (!ETHER_ISNULLADDR(rtt_target->addr.octet))) { - err = dhd_rtt_nan_start_session(dhd, rtt_target); - /* Irrespectivily go to exit */ - goto exit; + if (!rtt_status) { + return FALSE; } -#endif /* WL_NAN */ - if (!RTT_IS_ENABLED(rtt_status)) { - /* enable ftm */ - err = dhd_rtt_ftm_enable(dhd, TRUE); - if (err) { - DHD_ERROR(("failed to enable FTM (%d)\n", err)); - goto exit; - } + return rtt_status->geofence_cfg.rtt_in_progress; +} + +/* returns geofence RTT target list Head */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_target_info_t* head = NULL; + + if (!rtt_status) { + return NULL; } - /* delete session of index default sesession */ - err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); - if (err < 0 && err != BCME_NOTFOUND) { - DHD_ERROR(("failed to delete session of FTM (%d)\n", err)); - goto exit; + if (rtt_status->geofence_cfg.geofence_target_cnt) { + head = &rtt_status->geofence_cfg.geofence_target_info[0]; } - rtt_status->status = RTT_ENABLED; - memset(ftm_configs, 0, sizeof(ftm_configs)); - memset(ftm_params, 0, sizeof(ftm_params)); - /* configure the session 1 as initiator */ - ftm_configs[ftm_cfg_cnt].enable = TRUE; - ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR; - dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, - ftm_configs, ftm_cfg_cnt); + return head; +} - memset(ioctl_buf, 0, WLC_IOCTL_SMLEN); - err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, - ioctl_buf, WLC_IOCTL_SMLEN, NULL); - if (err) { - WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err)); +int8 +dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd) +{ + int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { goto exit; } - memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN); - /* local mac address */ - if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) { - ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr; - ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR; - bcm_ether_ntoa(&rtt_target->local_addr, eabuf); - DHD_RTT((">\t local %s\n", eabuf)); + target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + if (target_cnt == 0) { + goto exit; + } + + cur_idx = rtt_status->geofence_cfg.cur_target_idx; + ASSERT(cur_idx <= target_cnt); + +exit: + return cur_idx; +} + +void +dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { + return; + } + + if (rtt_status->geofence_cfg.geofence_target_cnt == 0) { + /* Invalidate current idx if no targets */ + rtt_status->geofence_cfg.cur_target_idx = + DHD_RTT_INVALID_TARGET_INDEX; + /* Cancel pending retry timer if any */ + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work(&rtt_status->rtt_retry_timer); + } + return; + } + rtt_status->geofence_cfg.cur_target_idx++; + + if (rtt_status->geofence_cfg.cur_target_idx >= + rtt_status->geofence_cfg.geofence_target_cnt) { + /* Reset once all targets done */ + rtt_status->geofence_cfg.cur_target_idx = 0; + } +} + +/* returns geofence current RTT target */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + rtt_geofence_target_info_t* cur_target = NULL; + int cur_idx = 0; + + if (!rtt_status) { + return NULL; + } + + cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd); + if (cur_idx >= 0) { + cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx]; + } + + return cur_target; +} + +/* returns geofence target from list for the peer */ +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index) +{ + int8 i; + rtt_status_info_t *rtt_status; + int target_cnt; + rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL; + + rtt_status = GET_RTTSTATE(dhd); + + if (!rtt_status) { + return NULL; + } + + target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + geofence_target_info = rtt_status->geofence_cfg.geofence_target_info; + + /* Loop through to find target */ + for (i = 0; i < target_cnt; i++) { + if (geofence_target_info[i].valid == FALSE) { + break; + } + if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr, + ETHER_ADDR_LEN)) { + *index = i; + tgt = &geofence_target_info[i]; + } + } + if (!tgt) { + DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list," + " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr))); + } + return tgt; +} + +/* add geofence target to the target list */ +int +dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_geofence_target_info_t *geofence_target_info; + int8 geofence_target_cnt, index; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + GEOFENCE_RTT_LOCK(rtt_status); + + /* Get the geofence_target via peer addr, index param is dumm here */ + geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index); + if (geofence_target_info) { + DHD_RTT(("Duplicate geofencing RTT add request dropped\n")); + err = BCME_OK; + goto exit; + } + + geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt; + if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) { + DHD_RTT(("Queue full, Geofencing RTT add request dropped\n")); + err = BCME_NORESOURCE; + goto exit; + } + + /* Add Geofence RTT request and increment target count */ + geofence_target_info = rtt_status->geofence_cfg.geofence_target_info; + /* src and dest buffer len same, pointers of same DS statically allocated */ + (void)memcpy_s(&geofence_target_info[geofence_target_cnt], + sizeof(geofence_target_info[geofence_target_cnt]), target, + sizeof(*target)); + geofence_target_info[geofence_target_cnt].valid = TRUE; + rtt_status->geofence_cfg.geofence_target_cnt++; + if (rtt_status->geofence_cfg.geofence_target_cnt == 1) { + /* Adding first target */ + rtt_status->geofence_cfg.cur_target_idx = 0; + } + +exit: + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +/* removes geofence target from the target list */ +int +dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_geofence_target_info_t *geofence_target_info; + int8 geofence_target_cnt, j, index = 0; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + GEOFENCE_RTT_LOCK(rtt_status); + + geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd); + if (geofence_target_cnt == 0) { + DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n")); + ASSERT(0); + goto exit; + } + + /* Get the geofence_target via peer addr */ + geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index); + if (geofence_target_info == NULL) { + DHD_RTT(("Geofencing RTT target not found, remove request dropped\n")); + err = BCME_NOTFOUND; + goto exit; + } + + /* left shift all the valid entries, as we dont keep holes in list */ + for (j = index; (j+1) < geofence_target_cnt; j++) { + if (geofence_target_info[j].valid == TRUE) { + /* + * src and dest buffer len same, pointers of same DS + * statically allocated + */ + (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]), + &geofence_target_info[j + 1], + sizeof(geofence_target_info[j + 1])); + } else { + break; + } + } + rtt_status->geofence_cfg.geofence_target_cnt--; + if ((rtt_status->geofence_cfg.geofence_target_cnt == 0) || + (index == rtt_status->geofence_cfg.cur_target_idx)) { + /* Move cur_idx to next target */ + dhd_rtt_move_geofence_cur_target_idx_to_next(dhd); + } else if (index < rtt_status->geofence_cfg.cur_target_idx) { + /* Decrement cur index if cur target position changed */ + rtt_status->geofence_cfg.cur_target_idx--; + } + +exit: + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +/* deletes/empty geofence target list */ +int +dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status; + + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + GEOFENCE_RTT_LOCK(rtt_status); + memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t), + 0, sizeof(rtt_geofence_cfg_t)); + GEOFENCE_RTT_UNLOCK(rtt_status); + return err; +} + +int +dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd) +{ + rtt_geofence_target_info_t *geofence_target_info; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + int ret = BCME_OK; + bool geofence_state; + bool role_concurrency_state; + u8 rtt_invalid_reason = RTT_STATE_VALID; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + NAN_MUTEX_LOCK(); + + if ((cfg->nan_init_state == FALSE) || + (cfg->nan_enable == FALSE)) { + ret = BCME_NOTENABLED; + goto done; + } + geofence_state = dhd_rtt_get_geofence_rtt_state(dhd); + role_concurrency_state = dhd_rtt_get_role_concurrency_state(dhd); + + DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: sched_reason = %d\n", + rtt_status->rtt_sched_reason)); + + if (geofence_state == TRUE || role_concurrency_state == TRUE) { + ret = BCME_ERROR; + DHD_RTT_ERR(("geofencing constraint , sched request dropped," + " geofence_state = %d, role_concurrency_state = %d\n", + geofence_state, role_concurrency_state)); + goto done; + } + + /* Get current geofencing target */ + geofence_target_info = dhd_rtt_get_geofence_current_target(dhd); + + /* call cfg API for trigerring geofencing RTT */ + if (geofence_target_info) { + /* check for dp/others concurrency */ + rtt_invalid_reason = dhd_rtt_invalid_states(dev, + &geofence_target_info->peer_addr); + if (rtt_invalid_reason != RTT_STATE_VALID) { + ret = BCME_BUSY; + DHD_RTT_ERR(("DRV State is not valid for RTT, " + "invalid_state = %d\n", rtt_invalid_reason)); + goto done; + } + + ret = wl_cfgnan_trigger_geofencing_ranging(dev, + &geofence_target_info->peer_addr); + if (ret == BCME_OK) { + dhd_rtt_set_geofence_rtt_state(dhd, TRUE); + } + } else { + DHD_RTT(("No RTT target to schedule\n")); + ret = BCME_NOTFOUND; + } + +done: + NAN_MUTEX_UNLOCK(); + return ret; +} +#endif /* WL_NAN */ + +#ifdef WL_CFG80211 +#ifdef WL_NAN +static void +dhd_rtt_retry(dhd_pub_t *dhd) +{ + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + rtt_geofence_target_info_t *geofence_target = NULL; + nan_ranging_inst_t *ranging_inst = NULL; + + geofence_target = dhd_rtt_get_geofence_current_target(dhd); + if (!geofence_target) { + DHD_RTT(("dhd_rtt_retry: geofence target null\n")); + goto exit; + } + ranging_inst = wl_cfgnan_get_ranging_inst(cfg, + &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR); + if (!ranging_inst) { + DHD_RTT(("dhd_rtt_retry: ranging instance null\n")); + goto exit; + } + wl_cfgnan_reset_geofence_ranging(cfg, + ranging_inst, RTT_SCHED_RTT_RETRY_GEOFENCE); + +exit: + return; +} + +static void +dhd_rtt_retry_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status = NULL; + dhd_pub_t *dhd = NULL; + struct net_device *dev = NULL; + struct bcm_cfg80211 *cfg = NULL; + + if (!work) { + goto exit; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + rtt_status = container_of(work, rtt_status_info_t, rtt_retry_timer.work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); + goto exit; + } + dev = dhd_linux_get_primary_netdev(dhd); + cfg = wl_get_cfg(dev); + + NAN_MUTEX_LOCK(); + mutex_lock(&rtt_status->rtt_mutex); + (void) dhd_rtt_retry(dhd); + mutex_unlock(&rtt_status->rtt_mutex); + NAN_MUTEX_UNLOCK(); + +exit: + return; +} +#endif /* WL_NAN */ + +/* + * Return zero (0) + * for valid RTT state + * means if RTT is applicable + */ +uint8 +dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr) +{ + uint8 invalid_reason = RTT_STATE_VALID; + struct bcm_cfg80211 *cfg = wl_get_cfg(ndev); + + UNUSED_PARAMETER(cfg); + UNUSED_PARAMETER(invalid_reason); + + /* Make sure peer addr is not NULL in caller */ + ASSERT(peer_addr); + /* + * Keep adding prohibited drv states here + * Only generic conditions which block + * All RTTs like NDP connection + */ + +#ifdef WL_NAN + if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) { + invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST; + DHD_RTT(("NDP in progress/connected, RTT prohibited\n")); + goto exit; + } +#endif /* WL_NAN */ + + /* Remove below #defines once more exit calls come */ +#ifdef WL_NAN +exit: +#endif /* WL_NAN */ + return invalid_reason; +} +#endif /* WL_CFG80211 */ + +void +dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason) +{ + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + if (rtt_status == NULL) { + ASSERT(0); + } else { + /* Cancel pending retry timer if any */ + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work(&rtt_status->rtt_retry_timer); + } + rtt_status->rtt_sched_reason = sched_reason; + schedule_work(&rtt_status->work); + } + return; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + rtt_results_header_t *entry, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_RTT_ERR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + if (rtt_status->all_cancel) { + /* cancel all of request */ + rtt_status->status = RTT_STOPPED; + DHD_RTT(("current RTT process is cancelled\n")); + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } + GCC_DIAGNOSTIC_POP(); + } + /* send the rtt complete event to wake up the user process */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + /* Cancel pending proxd timeout work if any */ + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work(&rtt_status->proxd_timeout); + } + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); +#ifdef WL_NAN + dhd_rtt_delete_nan_session(dhd); +#endif /* WL_NAN */ + dhd_rtt_ftm_enable(dhd, FALSE); + } + mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ + return err; +} + +#ifdef WL_CFG80211 +static void +dhd_rtt_timeout(dhd_pub_t *dhd) +{ + rtt_status_info_t *rtt_status; +#ifndef DHD_DUMP_ON_RTT_TIMEOUT + rtt_target_info_t *rtt_target = NULL; + rtt_target_info_t *rtt_target_info = NULL; +#ifdef WL_NAN + nan_ranging_inst_t *ranging_inst = NULL; + int ret = BCME_OK; + uint32 status; + struct net_device *ndev = dhd_linux_get_primary_netdev(dhd); + struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy); +#endif /* WL_NAN */ +#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */ + + rtt_status = GET_RTTSTATE(dhd); + if (!rtt_status) { + DHD_RTT_ERR(("Proxd timer expired but no RTT status\n")); + goto exit; + } + + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n")); + goto exit; + } + +#ifdef DHD_DUMP_ON_RTT_TIMEOUT + /* Dump, and Panic depending on memdump.info */ + if (dhd_query_bus_erros(dhd)) { + goto exit; + } +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* Behave based on user memdump info */ + dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#else /* DHD_DUMP_ON_RTT_TIMEOUT */ + /* Cancel RTT for target and proceed to next target */ + rtt_target_info = rtt_status->rtt_config.target_info; + if ((!rtt_target_info) || + (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) { + goto exit; + } + rtt_target = &rtt_target_info[rtt_status->cur_idx]; + WL_ERR(("Proxd timer expired for Target: "MACDBG" \n", MAC2STRDBG(&rtt_target->addr))); +#ifdef WL_NAN + if (rtt_target->peer == RTT_PEER_NAN) { + ranging_inst = wl_cfgnan_check_for_ranging(cfg, &rtt_target->addr); + if (!ranging_inst) { + goto exit; + } + ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id, + NAN_RNG_TERM_FLAG_IMMEDIATE, &status); + if (unlikely(ret) || unlikely(status)) { + WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n", + __FUNCTION__, ret, status)); + } + } else +#endif /* WL_NAN */ + { + /* For Legacy RTT */ + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + } + dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr); + dhd_rtt_handle_rtt_session_end(dhd); +#endif /* DHD_DUMP_ON_RTT_TIMEOUT */ +exit: + return; +} + +static void +dhd_rtt_timeout_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status = NULL; + dhd_pub_t *dhd = NULL; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + mutex_lock(&rtt_status->rtt_mutex); + (void) dhd_rtt_timeout(dhd); + mutex_unlock(&rtt_status->rtt_mutex); +} + +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int err_at = 0; + char eabuf[ETHER_ADDR_STR_LEN]; + char chanbuf[CHANSPEC_STR_LEN]; + int pm = PM_OFF; + int ftm_cfg_cnt = 0; + int ftm_param_cnt = 0; + uint32 rspec = 0; + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_target_info_t *rtt_target; + rtt_status_info_t *rtt_status; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + u8 ioctl_buf[WLC_IOCTL_SMLEN]; + u8 rtt_invalid_reason = RTT_STATE_VALID; + int rtt_sched_type = RTT_TYPE_INVALID; + + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + mutex_lock(&rtt_status->rtt_work_mutex); + + DHD_RTT(("Enter %s\n", __FUNCTION__)); + + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("No Directed RTT target to process, check for geofence\n")); + goto geofence; + } + + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + err_at = 1; + DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx)); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT_ERR(("STA is set as Target/Responder \n")); + err = BCME_ERROR; + err_at = 1; + } + goto exit; + } + + rtt_status->pm = PM_OFF; + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + if (err) { + DHD_RTT_ERR(("Failed to get the PM value\n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_RTT_ERR(("Failed to set the PM\n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + + mutex_lock(&rtt_status->rtt_mutex); + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + mutex_unlock(&rtt_status->rtt_mutex); + DHD_RTT(("%s enter\n", __FUNCTION__)); + + if (ETHER_ISNULLADDR(rtt_target->addr.octet)) { + err = BCME_BADADDR; + err_at = 2; + DHD_RTT(("RTT Target addr is NULL\n")); + goto exit; + } + + /* check for dp/others concurrency */ + rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr); + if (rtt_invalid_reason != RTT_STATE_VALID) { + err = BCME_BUSY; + err_at = 3; + DHD_RTT(("DRV State is not valid for RTT\n")); + goto exit; + } + +#ifdef WL_NAN + if (rtt_target->peer == RTT_PEER_NAN) { + rtt_sched_type = RTT_TYPE_NAN_DIRECTED; + rtt_status->status = RTT_ENABLED; + /* Ignore return value..failure taken care inside the API */ + dhd_rtt_nan_start_session(dhd, rtt_target); + goto exit; + } +#endif /* WL_NAN */ + if (!RTT_IS_ENABLED(rtt_status)) { + /* enable ftm */ + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_RTT_ERR(("failed to enable FTM (%d)\n", err)); + err_at = 5; + goto exit; + } + } + + /* delete session of index default sesession */ + err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + if (err < 0 && err != BCME_NOTFOUND) { + DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err)); + err_at = 6; + goto exit; + } + rtt_status->status = RTT_ENABLED; + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + + /* configure the session 1 as initiator */ + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = + WL_PROXD_SESSION_FLAG_INITIATOR | WL_PROXD_SESSION_FLAG_RANDMAC; + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); + + memset(ioctl_buf, 0, WLC_IOCTL_SMLEN); + + /* Rand Mac for newer version in place of cur_eth */ + if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) { + err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, + ioctl_buf, WLC_IOCTL_SMLEN, NULL); + if (err) { + DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err)); + err_at = 7; + goto exit; + } + memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN); + + /* local mac address */ + if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) { + ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR; + bcm_ether_ntoa(&rtt_target->local_addr, eabuf); + DHD_RTT((">\t local %s\n", eabuf)); + } } /* target's mac address */ if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) { @@ -1646,15 +2383,18 @@ dhd_rtt_start(dhd_pub_t *dhd) DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst)); } /* number of frame per burst */ - if (rtt_target->num_frames_per_burst == 0) { - rtt_target->num_frames_per_burst = - CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M : - CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M : - FTM_DEFAULT_CNT_80M; + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M; + if (CHSPEC_IS80(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M; + } else if (CHSPEC_IS40(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M; + } else if (CHSPEC_IS20(rtt_target->chanspec)) { + rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M; } ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst); ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM; DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst)); + /* FTM retry count */ if (rtt_target->num_retries_per_ftm) { ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm; @@ -1675,7 +2415,12 @@ dhd_rtt_start(dhd_pub_t *dhd) ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD; DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period)); } + /* Setting both duration and timeout to MAX duration + * to handle the congestion environments. + * Hence ignoring the user config. + */ /* burst-duration */ + rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS; if (rtt_target->burst_duration) { ftm_params[ftm_param_cnt].data_intvl.intvl = htol32(rtt_target->burst_duration); /* ms */ @@ -1684,45 +2429,63 @@ dhd_rtt_start(dhd_pub_t *dhd) DHD_RTT((">\t burst duration : %d ms\n", rtt_target->burst_duration)); } + /* burst-timeout */ + rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS; + if (rtt_target->burst_timeout) { + ftm_params[ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_timeout); /* ms */ + ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT; + DHD_RTT((">\t burst timeout : %d ms\n", + rtt_target->burst_timeout)); + } + /* event_mask..applicable for only Legacy RTT. + * For nan-rtt config happens from firmware + */ + ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) | + (1 << WL_PROXD_EVENT_SESSION_END)); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK; + if (rtt_target->bw && rtt_target->preamble) { bool use_default = FALSE; int nss; int mcs; switch (rtt_target->preamble) { - case RTT_PREAMBLE_LEGACY: - rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */ - rspec |= WL_RATE_6M; - break; - case RTT_PREAMBLE_HT: - rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */ - mcs = 0; /* default MCS 0 */ - rspec |= mcs; - break; - case RTT_PREAMBLE_VHT: - rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ - mcs = 0; /* default MCS 0 */ - nss = 1; /* default Nss = 1 */ - rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; - break; - default: - DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble)); - use_default = TRUE; - break; + case RTT_PREAMBLE_LEGACY: + rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */ + rspec |= WL_RATE_6M; + break; + case RTT_PREAMBLE_HT: + rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */ + mcs = 0; /* default MCS 0 */ + rspec |= mcs; + break; + case RTT_PREAMBLE_VHT: + rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + mcs = 0; /* default MCS 0 */ + nss = 1; /* default Nss = 1 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + break; + default: + DHD_RTT(("doesn't support this preamble : %d\n", + rtt_target->preamble)); + use_default = TRUE; + break; } switch (rtt_target->bw) { - case RTT_BW_20: - rspec |= WL_RSPEC_BW_20MHZ; - break; - case RTT_BW_40: - rspec |= WL_RSPEC_BW_40MHZ; - break; - case RTT_BW_80: - rspec |= WL_RSPEC_BW_80MHZ; - break; - default: - DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw)); - use_default = TRUE; - break; + case RTT_BW_20: + rspec |= WL_RSPEC_BW_20MHZ; + break; + case RTT_BW_40: + rspec |= WL_RSPEC_BW_40MHZ; + break; + case RTT_BW_80: + rspec |= WL_RSPEC_BW_80MHZ; + break; + default: + DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw)); + use_default = TRUE; + break; } if (!use_default) { ftm_params[ftm_param_cnt].data32 = htol32(rspec); @@ -1733,30 +2496,55 @@ dhd_rtt_start(dhd_pub_t *dhd) } dhd_set_rand_mac_oui(dhd); dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL, - ftm_params, ftm_param_cnt); + ftm_params, ftm_param_cnt); + rtt_sched_type = RTT_TYPE_LEGACY; err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE); if (err) { - DHD_ERROR(("failed to start session of FTM : error %d\n", err)); + DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err)); + err_at = 8; + } else { + /* schedule proxd timeout */ + schedule_delayed_work(&rtt_status->proxd_timeout, + msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS)); + } + + goto exit; +geofence: +#ifdef WL_NAN + /* sched geofencing rtt */ + rtt_sched_type = RTT_TYPE_NAN_GEOFENCE; + if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) { + DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err)); + err_at = 9; + } +#endif /* WL_NAN */ + exit: if (err) { - DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); + /* RTT Failed */ + DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d," + " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n" + " sched_reason = %d", + err, err_at, rtt_sched_type, rtt_invalid_reason, + rtt_status->rtt_sched_reason)); rtt_status->status = RTT_STOPPED; /* disable FTM */ dhd_rtt_ftm_enable(dhd, FALSE); if (rtt_status->pm_restore) { pm = PM_FAST; - DHD_ERROR(("pm_restore =%d func =%s \n", + DHD_RTT_ERR(("pm_restore =%d func =%s \n", rtt_status->pm_restore, __FUNCTION__)); err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); if (err) { - DHD_ERROR(("Failed to set PM \n")); + DHD_RTT_ERR(("Failed to set PM \n")); } else { rtt_status->pm_restore = FALSE; } } } + mutex_unlock(&rtt_status->rtt_work_mutex); return err; } #endif /* WL_CFG80211 */ @@ -1773,18 +2561,13 @@ dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); spin_lock_bh(¬i_list_lock); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); if (iter->noti_fn == noti_fn) { goto exit; } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); if (!cb) { err = -ENOMEM; @@ -1809,20 +2592,15 @@ dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); spin_lock_bh(¬i_list_lock); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + GCC_DIAGNOSTIC_POP(); if (iter->noti_fn == noti_fn) { cb = iter; list_del(&cb->list); break; } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif spin_unlock_bh(¬i_list_lock); if (cb) { @@ -1874,7 +2652,7 @@ dhd_rtt_convert_rate_to_host(uint32 rspec) #define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"} static int -dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data, +dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data, uint16 tlvid, uint16 len) { int i; @@ -1884,7 +2662,7 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data wl_proxd_session_state_t session_state; wl_proxd_status_t proxd_status; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) - struct timespec ts; + struct osl_timespec ts; #endif /* LINUX_VER >= 2.6.39 */ uint32 ratespec; uint32 avg_dist; @@ -1902,6 +2680,7 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data int32 dist = 0; uint8 num_ftm = 0; char *ftm_frame_types[] = FTM_FRAME_TYPES; + rtt_report_t *rtt_report = &(rtt_result->report); BCM_REFERENCE(ftm_frame_types); BCM_REFERENCE(dist); @@ -1912,6 +2691,8 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data BCM_REFERENCE(tof_phy_error); BCM_REFERENCE(bitflips); BCM_REFERENCE(snr); + BCM_REFERENCE(session_state); + BCM_REFERENCE(ftm_session_state_value_to_logstr); NULL_CHECK(rtt_report, "rtt_report is NULL", err); NULL_CHECK(p_data, "p_data is NULL", err); @@ -1924,7 +2705,6 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data session_state = ltoh16_ua(&p_data_info->state); proxd_status = ltoh32_ua(&p_data_info->status); bcm_ether_ntoa((&(p_data_info->peer)), eabuf); - ftm_session_state_value_to_logstr(session_state); ftm_status_value_to_logstr(proxd_status); DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", eabuf, @@ -2002,7 +2782,7 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data /* time stamp */ /* get the time elapsed from boot time */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) - get_monotonic_boottime(&ts); + osl_get_monotonic_boottime(&ts); rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); #endif /* LINUX_VER >= 2.6.39 */ @@ -2069,8 +2849,262 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data } DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" - " target_bitflips=%d dist=%d rtt=%d%s status %s" - " Type %s coreid=%d\n", + " target_bitflips=%d dist=%d rtt=%d%s status %s" + " Type %s coreid=%d\n", + i, p_sample->id, rssi, snr, + bitflips, tof_phy_error, tof_phy_tgt_error, + tof_target_snr, + tof_target_bitflips, dist, + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), + ftm_frame_types[i % num_ftm], p_sample->coreid)); + p_sample++; + } + } + return err; +} + +static int +dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data, + uint16 tlvid, uint16 len) +{ + int i; + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct osl_timespec ts; +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + const wl_proxd_rtt_result_v2_t *p_data_info = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample = NULL; + uint16 num_rtt = 0; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + uint16 snr = 0, bitflips = 0; + wl_proxd_phy_error_t tof_phy_error = 0; + wl_proxd_phy_error_t tof_phy_tgt_error = 0; + wl_proxd_snr_t tof_target_snr = 0; + wl_proxd_bitflips_t tof_target_bitflips = 0; + int16 rssi = 0; + int32 dist = 0; + uint32 chanspec = 0; + uint8 num_ftm = 0; + char *ftm_frame_types[] = FTM_FRAME_TYPES; + rtt_report_t *rtt_report = &(rtt_result->report); + + BCM_REFERENCE(ftm_frame_types); + BCM_REFERENCE(dist); + BCM_REFERENCE(rssi); + BCM_REFERENCE(tof_target_bitflips); + BCM_REFERENCE(tof_target_snr); + BCM_REFERENCE(tof_phy_tgt_error); + BCM_REFERENCE(tof_phy_error); + BCM_REFERENCE(bitflips); + BCM_REFERENCE(snr); + BCM_REFERENCE(chanspec); + BCM_REFERENCE(session_state); + BCM_REFERENCE(ftm_session_state_value_to_logstr); + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + + if (proxd_status != BCME_OK) { + DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) " + "num_meas_ota %d num_valid_rtt %d result_flags %x\n", + eabuf, session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, ftm_status_value_to_logstr(proxd_status), + p_data_info->num_meas, p_data_info->num_valid_rtt, + p_data_info->flags)); + } else { + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, ftm_status_value_to_logstr(proxd_status))); + } + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; + } else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d " + "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm, p_data_info->num_meas, + p_data_info->flags)); /* in a session */ + } + rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas; + rtt_result->rtt_detail.result_flags = p_data_info->flags; + /* show 'avg_rtt' sample */ + /* in v2, avg_rtt is the first element of the variable rtt[] */ + p_sample_avg = &p_data_info->rtt[0]; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d" + "ratespec=0x%08x chanspec=0x%08x\n", + (int16) ltoh16_ua(&p_sample_avg->rssi), + ltoh32_ua(&p_sample_avg->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample_avg->ratespec), + ltoh32_ua(&p_sample_avg->chanspec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + + /* num-ftm configured */ + rtt_report->ftm_num = p_data_info->num_ftm; + + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* Framework expects status as SUCCESS else all results will be + * set to zero even if we have partial valid result. + * So setting status as SUCCESS if we have a valid_rtt + * On burst timeout we stop burst with "timeout" reason and + * on msch end we set status as "cancel" + */ + if ((proxd_status == WL_PROXD_E_TIMEOUT || + proxd_status == WL_PROXD_E_CANCELED) && + rtt_report->success_num) { + rtt_report->status = RTT_STATUS_SUCCESS; + } + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_sample_avg->ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu); + rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + /* rtt_sd is in 0.1 ns. + * host needs distance_sd in milli mtrs + * (0.1 * rtt_sd/2 * 10^-9) * C * 1000 + */ + rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */ + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ + /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + osl_get_monotonic_boottime(&ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ + + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + } + /* display detail if available */ + num_rtt = ltoh16_ua(&p_data_info->num_rtt); + if (num_rtt > 0) { + DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); + p_sample = &p_data_info->rtt[1]; + for (i = 0; i < num_rtt; i++) { + snr = 0; + bitflips = 0; + tof_phy_error = 0; + tof_phy_tgt_error = 0; + tof_target_snr = 0; + tof_target_bitflips = 0; + rssi = 0; + dist = 0; + num_ftm = p_data_info->num_ftm; + /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ + if ((i % num_ftm) == 1) { + rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); + snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); + bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); + tof_phy_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_phy_error); + tof_phy_tgt_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_tgt_phy_error); + tof_target_snr = + (wl_proxd_snr_t) + ltoh16_ua(&p_sample->tof_tgt_snr); + tof_target_bitflips = + (wl_proxd_bitflips_t) + ltoh16_ua(&p_sample->tof_tgt_bitflips); + dist = ltoh32_ua(&p_sample->distance); + chanspec = ltoh32_ua(&p_sample->chanspec); + } else { + rssi = -1; + snr = 0; + bitflips = 0; + dist = 0; + tof_target_bitflips = 0; + tof_target_snr = 0; + tof_phy_tgt_error = 0; + } + DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" + " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" + " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s" + " coreid=%d chanspec=0x%08x\n", i, p_sample->id, rssi, snr, bitflips, tof_phy_error, tof_phy_tgt_error, tof_target_snr, @@ -2078,235 +3112,395 @@ dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data ltoh32_ua(&p_sample->rtt.intvl), ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), - ftm_frame_types[i % num_ftm], p_sample->coreid)); + ftm_frame_types[i % num_ftm], p_sample->coreid, + chanspec)); p_sample++; } } return err; } +#ifdef WL_CFG80211 +/* Common API for handling Session End. +* This API will flush out the results for a peer MAC. +* +* @For legacy FTM session, this API will be called +* when legacy FTM_SESSION_END event is received. +* @For legacy Nan-RTT , this API will be called when +* we are cancelling the nan-ranging session or on +* nan-ranging-end event. +*/ +static void +dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd) +{ + + int idx; + struct rtt_noti_callback *iter; + rtt_results_header_t *entry, *next; + rtt_result_t *next2; + rtt_result_t *rtt_result; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + + /* Cancel pending proxd timeout work if any */ + if (delayed_work_pending(&rtt_status->proxd_timeout)) { + cancel_delayed_work(&rtt_status->proxd_timeout); + } + + /* find next target to trigger RTT */ + for (idx = (rtt_status->cur_idx + 1); + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + DHD_INFO(("restart to measure rtt\n")); + schedule_work(&rtt_status->work); + } else { + DHD_RTT(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + /* notify the completed information to others */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } + } + GCC_DIAGNOSTIC_POP(); + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), + 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + } +} +#endif /* WL_CFG80211 */ +#ifdef WL_CFG80211 static int -dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data, - uint16 tlvid, uint16 len) +dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status, + struct ether_addr *addr) { - int i; - int err = BCME_OK; - char eabuf[ETHER_ADDR_STR_LEN]; - wl_proxd_result_flags_t flags; - wl_proxd_session_state_t session_state; - wl_proxd_status_t proxd_status; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) - struct timespec ts; -#endif /* LINUX_VER >= 2.6.39 */ - uint32 ratespec; - uint32 avg_dist; - const wl_proxd_rtt_result_v2_t *p_data_info = NULL; - const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL; - const wl_proxd_rtt_sample_v2_t *p_sample = NULL; - uint16 num_rtt = 0; - wl_proxd_intvl_t rtt; - wl_proxd_intvl_t p_time; - uint16 snr = 0, bitflips = 0; - wl_proxd_phy_error_t tof_phy_error = 0; - wl_proxd_phy_error_t tof_phy_tgt_error = 0; - wl_proxd_snr_t tof_target_snr = 0; - wl_proxd_bitflips_t tof_target_bitflips = 0; - int16 rssi = 0; - int32 dist = 0; - uint32 chanspec = 0; - uint8 num_ftm = 0; - char *ftm_frame_types[] = FTM_FRAME_TYPES; + rtt_results_header_t *rtt_results_header = NULL; + rtt_target_info_t *rtt_target_info; + int ret = BCME_OK; + rtt_result_t *rtt_result; - BCM_REFERENCE(ftm_frame_types); - BCM_REFERENCE(dist); - BCM_REFERENCE(rssi); - BCM_REFERENCE(tof_target_bitflips); - BCM_REFERENCE(tof_target_snr); - BCM_REFERENCE(tof_phy_tgt_error); - BCM_REFERENCE(tof_phy_error); - BCM_REFERENCE(bitflips); - BCM_REFERENCE(snr); - BCM_REFERENCE(chanspec); + /* allocate new header for rtt_results */ + rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_results_header_t)); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; + } + rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + /* same src and dest len */ + (void)memcpy_s(&rtt_results_header->peer_mac, + ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN); + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + + /* allocate rtt_results for new results */ + rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_result_t)); + if (!rtt_result) { + ret = -ENOMEM; + kfree(rtt_results_header); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP; + /* same src and dest len */ + (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN, + &rtt_target_info->addr, ETHER_ADDR_LEN); + rtt_result->report.distance = FTM_INVALID; + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; +exit: + return ret; +} - NULL_CHECK(rtt_report, "rtt_report is NULL", err); - NULL_CHECK(p_data, "p_data is NULL", err); - DHD_RTT(("%s enter\n", __FUNCTION__)); - p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data; - /* unpack and format 'flags' for display */ - flags = ltoh16_ua(&p_data_info->flags); - /* session state and status */ - session_state = ltoh16_ua(&p_data_info->state); - proxd_status = ltoh32_ua(&p_data_info->status); - bcm_ether_ntoa((&(p_data_info->peer)), eabuf); - ftm_session_state_value_to_logstr(session_state); - ftm_status_value_to_logstr(proxd_status); - DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", - eabuf, - session_state, - ftm_session_state_value_to_logstr(session_state), - proxd_status, - ftm_status_value_to_logstr(proxd_status))); - /* show avg_dist (1/256m units), burst_num */ - avg_dist = ltoh32_ua(&p_data_info->avg_dist); - if (avg_dist == 0xffffffff) { /* report 'failure' case */ - DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", - ltoh16_ua(&p_data_info->burst_num), - p_data_info->num_valid_rtt)); /* in a session */ - avg_dist = FTM_INVALID; - } else { - DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n", - avg_dist >> 8, /* 1/256m units */ - ((avg_dist & 0xff) * 625) >> 4, - ltoh16_ua(&p_data_info->burst_num), - p_data_info->num_valid_rtt, - p_data_info->num_ftm)); /* in a session */ +static bool +dhd_rtt_get_report_header(rtt_status_info_t *rtt_status, + rtt_results_header_t **rtt_results_header, struct ether_addr *addr) +{ + rtt_results_header_t *entry; + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + /* find a rtt_report_header for this mac address */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + GCC_DIAGNOSTIC_POP(); + if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) { + /* found a rtt_report_header for peer_mac in the list */ + if (rtt_results_header) { + *rtt_results_header = entry; + } + return TRUE; + } } + return FALSE; +} - /* show 'avg_rtt' sample */ - /* in v2, avg_rtt is the first element of the variable rtt[] */ - p_sample_avg = &p_data_info->rtt[0]; - ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); - DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d" - "ratespec=0x%08x chanspec=0x%08x\n", - (int16) ltoh16_ua(&p_sample_avg->rssi), - ltoh32_ua(&p_sample_avg->rtt.intvl), - ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), - ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, - ltoh32_ua(&p_sample_avg->ratespec), - ltoh32_ua(&p_sample_avg->chanspec))); +int +dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer) +{ + bool is_new = TRUE; + rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd); + mutex_lock(&rtt_status->rtt_mutex); + is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer); - /* set peer address */ - rtt_report->addr = p_data_info->peer; + if (is_new) { /* no FTM result..create failure result */ + dhd_rtt_create_failure_result(rtt_status, peer); + } + dhd_rtt_handle_rtt_session_end(dhd); + mutex_unlock(&rtt_status->rtt_mutex); + return BCME_OK; +} +#endif /* WL_CFG80211 */ - /* burst num */ - rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); +static bool +dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result) +{ + bool ret = FALSE; - /* success num */ - rtt_report->success_num = p_data_info->num_valid_rtt; + if (rtt_result && (rtt_result->report.success_num != 0)) { + ret = TRUE; + } + return ret; +} - /* actual number of FTM supported by peer */ - rtt_report->num_per_burst_peer = p_data_info->num_ftm; - rtt_report->negotiated_burst_num = p_data_info->num_ftm; +static int +dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data, + int tlvs_len, rtt_result_t *rtt_result) +{ + int ret = BCME_OK; - /* status */ - rtt_report->status = ftm_get_statusmap_info(proxd_status, - &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) rtt_result, + (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->detail_len = sizeof(rtt_result->rtt_detail); - /* rssi (0.5db) */ - rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2; +exit: + return ret; - /* rx rate */ - ratespec = ltoh32_ua(&p_sample_avg->ratespec); - rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); +} - /* tx rate */ - if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { - rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); +static int +dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr, + wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan) +{ + rtt_status_info_t *rtt_status; + rtt_results_header_t *rtt_results_header = NULL; + bool is_new = TRUE; + int ret = BCME_OK; + int err_at = 0; + + rtt_status = GET_RTTSTATE(dhd); + is_new = !dhd_rtt_get_report_header(rtt_status, + &rtt_results_header, peer_addr); + + if (tlvs_len > 0) { + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh, + sizeof(rtt_results_header_t)); + if (!rtt_results_header) { + ret = BCME_NORESOURCE; + err_at = 1; + goto exit; + } + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + /* same src and header len */ + (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN, + peer_addr, ETHER_ADDR_LEN); + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + } + + ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result); + if ((ret == BCME_OK) && ((!is_nan) || + dhd_rtt_is_valid_measurement(rtt_result))) { + /* + * Add to list, if non-nan RTT (legacy) or + * valid measurement in nan rtt case + */ + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len + + rtt_result->detail_len; + } else { + err_at = 2; + if (ret == BCME_OK) { + /* Case for nan rtt invalid measurement */ + ret = BCME_ERROR; + err_at = 3; + } + goto exit; + } } else { - rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + ret = BCME_ERROR; + err_at = 4; + goto exit; } - /* rtt_sd */ - rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu); - rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl); - rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ - rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ - DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); - DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); +exit: + if (ret != BCME_OK) { + DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, " + " ret = %d, err_at = %d\n", ret, err_at)); + if (rtt_results_header) { + list_del(&rtt_results_header->list); + kfree(rtt_results_header); + rtt_results_header = NULL; + } + } + return ret; +} + +#ifdef WL_NAN +static void +dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg, + rtt_result_t *rtt_result) +{ + wl_nan_ev_rng_rpt_ind_t range_res; + + UNUSED_PARAMETER(range_res); + if (!dhd_rtt_is_valid_measurement(rtt_result)) { + /* Drop Invalid Measurements for NAN RTT report */ + DHD_RTT(("dhd_rtt_nan_range_report: Drop Invalid Measurements\n")); + goto exit; + } + bzero(&range_res, sizeof(range_res)); + range_res.indication = 0; + range_res.dist_mm = rtt_result->report.distance; + /* same src and header len, ignoring ret val here */ + (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN, + &rtt_result->report.addr, ETHER_ADDR_LEN); + wl_cfgnan_process_range_report(cfg, &range_res); + +exit: + return; +} + +static int +dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr, + wl_proxd_event_t *proxd_ev_data, int tlvs_len) +{ + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg = NULL; + nan_ranging_inst_t *rng_inst = NULL; + rtt_status_info_t *rtt_status = NULL; + rtt_result_t *rtt_result = NULL; + bool is_geofence = FALSE; + int ret = BCME_OK; + + ndev = dhd_linux_get_primary_netdev(dhd); + cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + NAN_MUTEX_LOCK(); + mutex_lock(&rtt_status->rtt_mutex); - /* average distance */ - if (avg_dist != FTM_INVALID) { - rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ - rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; - } else { - rtt_report->distance = FTM_INVALID; + if ((cfg->nan_enable == FALSE) || + ETHER_ISNULLADDR(peer_addr)) { + DHD_RTT_ERR(("Received Burst End with NULL ether addr, " + "or nan disable, nan_enable = %d\n", cfg->nan_enable)); + ret = BCME_UNSUPPORTED; + goto exit; } - /* time stamp */ - /* get the time elapsed from boot time */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) - get_monotonic_boottime(&ts); - rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); -#endif /* LINUX_VER >= 2.6.39 */ - if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { - /* retry time after failure */ - p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); - p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); - rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ - DHD_RTT((">\tretry_after: %d%s\n", - ltoh32_ua(&p_data_info->u.retry_after.intvl), - ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr); + if (rng_inst) { + is_geofence = (rng_inst->range_type + == RTT_TYPE_NAN_GEOFENCE); } else { - /* burst duration */ - p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); - p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); - rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ - DHD_RTT((">\tburst_duration: %d%s\n", - ltoh32_ua(&p_data_info->u.burst_duration.intvl), - ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); - DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + DHD_RTT_ERR(("Received Burst End without Ranging Instance\n")); + ret = BCME_ERROR; + goto exit; } - /* display detail if available */ - num_rtt = ltoh16_ua(&p_data_info->num_rtt); - if (num_rtt > 0) { - DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); - p_sample = &p_data_info->rtt[1]; - for (i = 0; i < num_rtt; i++) { - snr = 0; - bitflips = 0; - tof_phy_error = 0; - tof_phy_tgt_error = 0; - tof_target_snr = 0; - tof_target_bitflips = 0; - rssi = 0; - dist = 0; - num_ftm = p_data_info->num_ftm; - /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ - if ((i % num_ftm) == 1) { - rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); - snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); - bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); - tof_phy_error = - (wl_proxd_phy_error_t) - ltoh32_ua(&p_sample->tof_phy_error); - tof_phy_tgt_error = - (wl_proxd_phy_error_t) - ltoh32_ua(&p_sample->tof_tgt_phy_error); - tof_target_snr = - (wl_proxd_snr_t) - ltoh16_ua(&p_sample->tof_tgt_snr); - tof_target_bitflips = - (wl_proxd_bitflips_t) - ltoh16_ua(&p_sample->tof_tgt_bitflips); - dist = ltoh32_ua(&p_sample->distance); - chanspec = ltoh32_ua(&p_sample->chanspec); + + /* allocate rtt_results for new results */ + rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t)); + if (!rtt_result) { + ret = BCME_NORESOURCE; + goto exit; + } + + if (is_geofence) { + ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result); + if (ret != BCME_OK) { + DHD_RTT_ERR(("dhd_rtt_handle_nan_burst_end: " + "dhd_rtt_parse_result_event failed\n")); + goto exit; + } + } else { + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); } else { - rssi = -1; - snr = 0; - bitflips = 0; - dist = 0; - tof_target_bitflips = 0; - tof_target_snr = 0; - tof_phy_tgt_error = 0; + ret = BCME_UNSUPPORTED; + goto exit; } - DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" - " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" - " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s" - " coreid=%d chanspec=0x%08x\n", - i, p_sample->id, rssi, snr, - bitflips, tof_phy_error, tof_phy_tgt_error, - tof_target_snr, - tof_target_bitflips, dist, - ltoh32_ua(&p_sample->rtt.intvl), - ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), - ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), - ftm_frame_types[i % num_ftm], p_sample->coreid, - chanspec)); - p_sample++; } + ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr, + proxd_ev_data, tlvs_len, rtt_result, TRUE); + if (ret != BCME_OK) { + goto exit; + } + } - return err; + +exit: + mutex_unlock(&rtt_status->rtt_mutex); + if (ret == BCME_OK) { + dhd_rtt_nan_range_report(cfg, rtt_result); + } + if (rtt_result && + ((ret != BCME_OK) || is_geofence)) { + kfree(rtt_result); + rtt_result = NULL; + } + NAN_MUTEX_UNLOCK(); + return ret; } +#endif /* WL_NAN */ int dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) @@ -2319,34 +3513,15 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) wl_proxd_ftm_session_status_t session_status; const ftm_strmap_entry_t *p_loginfo; rtt_result_t *rtt_result; - gfp_t kflags; #ifdef WL_CFG80211 - int idx; rtt_status_info_t *rtt_status; - rtt_target_info_t *rtt_target_info; - struct rtt_noti_callback *iter; - rtt_results_header_t *entry, *next, *rtt_results_header = NULL; - rtt_result_t *next2; + rtt_results_header_t *rtt_results_header = NULL; bool is_new = TRUE; #endif /* WL_CFG80211 */ DHD_RTT(("Enter %s \n", __FUNCTION__)); NULL_CHECK(dhd, "dhd is NULL", ret); -#ifdef WL_CFG80211 - rtt_status = GET_RTTSTATE(dhd); - NULL_CHECK(rtt_status, "rtt_status is NULL", ret); - - if (RTT_IS_STOPPED(rtt_status)) { - /* Ignore the Proxd event */ - DHD_RTT((" event handler rtt is stopped \n")); - if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { - DHD_RTT(("Device is target/Responder. Recv the event. \n")); - } else { - return ret; - } - } -#endif /* WL_CFG80211 */ if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) { DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__, ntoh32_ua((void *)&event->datalen))); @@ -2354,43 +3529,37 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) } event_type = ntoh32_ua((void *)&event->event_type); if (event_type != WLC_E_PROXD) { - DHD_ERROR((" failed event \n")); + DHD_RTT_ERR((" failed event \n")); return -EINVAL; } if (!event_data) { - DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__)); + DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__)); return -EINVAL; } p_event = (wl_proxd_event_t *) event_data; version = ltoh16(p_event->version); if (version < WL_PROXD_API_VERSION) { - DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n", + DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n", version, WL_PROXD_API_VERSION)); return ret; } -#ifdef WL_CFG80211 - if (!in_atomic()) { - mutex_lock(&rtt_status->rtt_mutex); - } -#endif /* WL_CFG80211 */ - event_type = (wl_proxd_event_type_t) ltoh16(p_event->type); - kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + event_type = (wl_proxd_event_type_t) ltoh16(p_event->type); DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n", p_event->type, ntoh16(p_event->type), ltoh16(p_event->type))); p_loginfo = ftm_get_event_type_loginfo(event_type); if (p_loginfo == NULL) { - DHD_ERROR(("receive an invalid FTM event %d\n", event_type)); + DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type)); ret = -EINVAL; - goto exit; /* ignore this event */ + return ret; /* ignore this event */ } /* get TLVs len, skip over event header */ if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) { - DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len))); + DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len))); ret = -EINVAL; - goto exit; + return ret; } tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs); DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n", @@ -2401,23 +3570,36 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) ltoh16(p_event->sid), tlvs_len)); #ifdef WL_CFG80211 - rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - /* find a rtt_report_header for this mac address */ - list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { - if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) { - /* found a rtt_report_header for peer_mac in the list */ - is_new = FALSE; - rtt_results_header = entry; - break; - } +#ifdef WL_NAN + if ((event_type == WL_PROXD_EVENT_BURST_END) && + dhd_rtt_is_nan_peer(dhd, &event->addr)) { + DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n")); + ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len); + return ret; } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif +#endif /* WL_NAN */ + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + mutex_lock(&rtt_status->rtt_mutex); + + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); + } else { + ret = BCME_NOTREADY; + goto exit; + } + } +#endif /* WL_CFG80211 */ + +#ifdef WL_CFG80211 + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + is_new = !dhd_rtt_get_report_header(rtt_status, + &rtt_results_header, &event->addr); + GCC_DIAGNOSTIC_POP(); #endif /* WL_CFG80211 */ switch (event_type) { case WL_PROXD_EVENT_SESSION_CREATE: @@ -2430,52 +3612,30 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) DHD_RTT(("WL_PROXD_EVENT_BURST_START\n")); break; case WL_PROXD_EVENT_BURST_END: - DHD_RTT(("WL_PROXD_EVENT_BURST_END\n")); -#ifdef WL_CFG80211 - if (is_new) { - /* allocate new header for rtt_results */ - rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags); - if (!rtt_results_header) { - ret = -ENOMEM; - goto exit; - } - /* Initialize the head of list for rtt result */ - INIT_LIST_HEAD(&rtt_results_header->result_list); - rtt_results_header->peer_mac = event->addr; - list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n")); + /* allocate rtt_results for new legacy rtt results */ + rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t)); + if (!rtt_result) { + ret = -ENOMEM; + goto exit; } -#endif /* WL_CFG80211 */ - if (tlvs_len > 0) { - /* allocate rtt_results for new results */ - rtt_result = kzalloc(sizeof(rtt_result_t), kflags); - if (!rtt_result) { - ret = -ENOMEM; - goto exit; - } - /* unpack TLVs and invokes the cbfn to print the event content TLVs */ - ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report), - (uint8 *)&p_event->tlvs[0], tlvs_len, - BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); - if (ret != BCME_OK) { - DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", - __FUNCTION__)); - goto exit; - } -#ifdef WL_CFG80211 - /* fill out the results from the configuration param */ - rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; - rtt_result->report.type = RTT_TWO_WAY; - DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); - rtt_result->report_len = RTT_REPORT_SIZE; - - list_add_tail(&rtt_result->list, &rtt_results_header->result_list); - rtt_results_header->result_cnt++; - rtt_results_header->result_tot_len += rtt_result->report_len; -#endif /* WL_CFG80211 */ + ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr, + p_event, tlvs_len, rtt_result, FALSE); + if (rtt_result && (ret != BCME_OK)) { + kfree(rtt_result); + rtt_result = NULL; + goto exit; } break; case WL_PROXD_EVENT_SESSION_END: - DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n")); + DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n")); + if (dhd_rtt_is_nan_peer(dhd, &event->addr)) { + /* + * Nothing to do for session end for nan peer + * All taken care in burst end and nan rng rep + */ + break; + } #ifdef WL_CFG80211 if (!RTT_IS_ENABLED(rtt_status)) { DHD_RTT(("Ignore the session end evt\n")); @@ -2488,7 +3648,7 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) (uint8 *)&p_event->tlvs[0], tlvs_len, BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); if (ret != BCME_OK) { - DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n", __FUNCTION__)); goto exit; } @@ -2496,90 +3656,10 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) #ifdef WL_CFG80211 /* In case of no result for the peer device, make fake result for error case */ if (is_new) { - /* allocate new header for rtt_results */ - rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL); - if (!rtt_results_header) { - ret = -ENOMEM; - goto exit; - } - /* Initialize the head of list for rtt result */ - INIT_LIST_HEAD(&rtt_results_header->result_list); - rtt_results_header->peer_mac = event->addr; - list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); - - /* allocate rtt_results for new results */ - rtt_result = kzalloc(sizeof(rtt_result_t), kflags); - if (!rtt_result) { - ret = -ENOMEM; - kfree(rtt_results_header); - goto exit; - } - /* fill out the results from the configuration param */ - rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; - rtt_result->report.type = RTT_TWO_WAY; - DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); - rtt_result->report_len = RTT_REPORT_SIZE; - rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP; - rtt_result->report.addr = rtt_target_info->addr; - rtt_result->report.distance = FTM_INVALID; - list_add_tail(&rtt_result->list, &rtt_results_header->result_list); - rtt_results_header->result_cnt++; - rtt_results_header->result_tot_len += rtt_result->report_len; - } - /* find next target to trigger RTT */ - for (idx = (rtt_status->cur_idx + 1); - idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { - /* skip the disabled device */ - if (rtt_status->rtt_config.target_info[idx].disable) { - continue; - } else { - /* set the idx to cur_idx */ - rtt_status->cur_idx = idx; - break; - } - } - if (idx < rtt_status->rtt_config.rtt_target_cnt) { - /* restart to measure RTT from next device */ - DHD_ERROR(("restart to measure rtt\n")); - schedule_work(&rtt_status->work); - } else { - DHD_RTT(("RTT_STOPPED\n")); - rtt_status->status = RTT_STOPPED; - schedule_work(&rtt_status->work); - /* notify the completed information to others */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { - iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); - } - /* remove the rtt results in cache */ - if (!list_empty(&rtt_status->rtt_results_cache)) { - /* Iterate rtt_results_header list */ - list_for_each_entry_safe(entry, next, - &rtt_status->rtt_results_cache, list) { - list_del(&entry->list); - /* Iterate rtt_result list */ - list_for_each_entry_safe(rtt_result, next2, - &entry->result_list, list) { - list_del(&rtt_result->list); - kfree(rtt_result); - } - kfree(entry); - } - } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - /* reinitialize the HEAD */ - INIT_LIST_HEAD(&rtt_status->rtt_results_cache); - /* clear information for rtt_config */ - rtt_status->rtt_config.rtt_target_cnt = 0; - memset(rtt_status->rtt_config.target_info, 0, - TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); - rtt_status->cur_idx = 0; + dhd_rtt_create_failure_result(rtt_status, &event->addr); } + DHD_RTT(("\n Not Nan peer..proceed to notify result and restart\n")); + dhd_rtt_handle_rtt_session_end(dhd); #endif /* WL_CFG80211 */ break; case WL_PROXD_EVENT_SESSION_RESTART: @@ -2607,7 +3687,7 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) DHD_RTT(("WL_PROXD_EVENT_COLLECT\n")); if (tlvs_len > 0) { void *buffer = NULL; - if (!(buffer = kzalloc(tlvs_len, kflags))) { + if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) { ret = -ENOMEM; goto exit; } @@ -2617,7 +3697,7 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); kfree(buffer); if (ret != BCME_OK) { - DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n", + DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n", __FUNCTION__, event_type)); goto exit; } @@ -2627,7 +3707,7 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n")); if (tlvs_len > 0) { void *buffer = NULL; - if (!(buffer = kzalloc(tlvs_len, kflags))) { + if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) { ret = -ENOMEM; goto exit; } @@ -2637,7 +3717,7 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); kfree(buffer); if (ret != BCME_OK) { - DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n", + DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n", __FUNCTION__, event_type)); goto exit; } @@ -2645,14 +3725,12 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) break; default: - DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type)); + DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type)); break; } exit: #ifdef WL_CFG80211 - if (!in_atomic()) { - mutex_unlock(&rtt_status->rtt_mutex); - } + mutex_unlock(&rtt_status->rtt_mutex); #endif /* WL_CFG80211 */ return ret; @@ -2664,21 +3742,14 @@ dhd_rtt_work(struct work_struct *work) { rtt_status_info_t *rtt_status; dhd_pub_t *dhd; -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); rtt_status = container_of(work, rtt_status_info_t, work); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif - if (rtt_status == NULL) { - DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); - return; - } + GCC_DIAGNOSTIC_POP(); + dhd = rtt_status->dhd; if (dhd == NULL) { - DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__)); return; } (void) dhd_rtt_start(dhd); @@ -2759,7 +3830,7 @@ dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info) ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); } } else { - DHD_ERROR(("Failed to get the chanspec \n")); + DHD_RTT_ERR(("Failed to get the chanspec \n")); } return err; } @@ -2803,18 +3874,18 @@ dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) wf_chspec_ntoa(chanspec, chanbuf))); err = wldev_iovar_setint(dev, "chanspec", chanspec); if (err) { - DHD_ERROR(("Failed to set the chanspec \n")); + DHD_RTT_ERR(("Failed to set the chanspec \n")); } } rtt_status->pm = PM_OFF; err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); DHD_RTT(("Current PM value read %d\n", rtt_status->pm)); if (err) { - DHD_ERROR(("Failed to get the PM value \n")); + DHD_RTT_ERR(("Failed to get the PM value \n")); } else { err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); if (err) { - DHD_ERROR(("Failed to set the PM \n")); + DHD_RTT_ERR(("Failed to set the PM \n")); rtt_status->pm_restore = FALSE; } else { rtt_status->pm_restore = TRUE; @@ -2823,7 +3894,7 @@ dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) if (!RTT_IS_ENABLED(rtt_status)) { err = dhd_rtt_ftm_enable(dhd, TRUE); if (err) { - DHD_ERROR(("Failed to enable FTM (%d)\n", err)); + DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err)); goto exit; } DHD_RTT(("FTM enabled \n")); @@ -2841,14 +3912,14 @@ dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) exit: if (err) { rtt_status->status = RTT_STOPPED; - DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); + DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__)); dhd_rtt_ftm_enable(dhd, FALSE); DHD_RTT(("restoring the PM value \n")); if (rtt_status->pm_restore) { pm = PM_FAST; err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); if (err) { - DHD_ERROR(("Failed to restore PM \n")); + DHD_RTT_ERR(("Failed to restore PM \n")); } else { rtt_status->pm_restore = FALSE; } @@ -2871,7 +3942,7 @@ dhd_rtt_cancel_responder(dhd_pub_t *dhd) DHD_RTT(("Enter %s \n", __FUNCTION__)); err = dhd_rtt_ftm_enable(dhd, FALSE); if (err) { - DHD_ERROR(("failed to disable FTM (%d)\n", err)); + DHD_RTT_ERR(("failed to disable FTM (%d)\n", err)); } rtt_status->status = RTT_STOPPED; if (rtt_status->pm_restore) { @@ -2879,7 +3950,7 @@ dhd_rtt_cancel_responder(dhd_pub_t *dhd) DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore)); err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); if (err) { - DHD_ERROR(("Failed to restore PM \n")); + DHD_RTT_ERR(("Failed to restore PM \n")); } else { rtt_status->pm_restore = FALSE; } @@ -2897,23 +3968,28 @@ dhd_rtt_init(dhd_pub_t *dhd) int32 drv_up = 1; int32 version; rtt_status_info_t *rtt_status; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + int ftm_param_cnt = 0; + NULL_CHECK(dhd, "dhd is NULL", err); dhd->rtt_supported = FALSE; if (dhd->rtt_state) { return err; } - dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL); + dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh, + sizeof(rtt_status_info_t)); if (dhd->rtt_state == NULL) { err = BCME_NOMEM; - DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__)); + DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__)); return err; } bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); rtt_status = GET_RTTSTATE(dhd); rtt_status->rtt_config.target_info = - kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL); + (rtt_target_info_t *)MALLOCZ(dhd->osh, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); if (rtt_status->rtt_config.target_info == NULL) { - DHD_ERROR(("%s failed to allocate the target info for %d\n", + DHD_RTT_ERR(("%s failed to allocate the target info for %d\n", __FUNCTION__, RTT_MAX_TARGET_CNT)); err = BCME_NOMEM; goto exit; @@ -2924,7 +4000,7 @@ dhd_rtt_init(dhd_pub_t *dhd) ret = dhd_rtt_get_version(dhd, &version); if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) { - DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__)); + DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__)); dhd->rtt_supported = TRUE; /* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */ rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY; @@ -2943,18 +4019,32 @@ dhd_rtt_init(dhd_pub_t *dhd) rtt_status->rtt_capa.bw |= RTT_BW_80; } else { if ((ret != BCME_OK) || (version == 0)) { - DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__)); + DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__)); } else { - DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n", + DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n", __FUNCTION__, WL_PROXD_API_VERSION, version)); } } /* cancel all of RTT request once we got the cancel request */ rtt_status->all_cancel = TRUE; mutex_init(&rtt_status->rtt_mutex); + mutex_init(&rtt_status->rtt_work_mutex); + mutex_init(&rtt_status->geofence_mutex); INIT_LIST_HEAD(&rtt_status->noti_fn_list); INIT_LIST_HEAD(&rtt_status->rtt_results_cache); INIT_WORK(&rtt_status->work, dhd_rtt_work); + /* initialize proxd timer */ + INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work); +#ifdef WL_NAN + /* initialize proxd retry timer */ + INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work); +#endif /* WL_NAN */ + /* Global proxd config */ + ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) | + (1 << WL_PROXD_EVENT_SESSION_END)); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK; + dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL, + ftm_params, ftm_param_cnt); exit: if (err < 0) { kfree(rtt_status->rtt_config.target_info); @@ -2980,11 +4070,7 @@ dhd_rtt_deinit(dhd_pub_t *dhd) rtt_status->status = RTT_STOPPED; DHD_RTT(("rtt is stopped %s \n", __FUNCTION__)); /* clear evt callback list */ -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif - + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); if (!list_empty(&rtt_status->noti_fn_list)) { list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { list_del(&iter->list); @@ -3003,9 +4089,11 @@ dhd_rtt_deinit(dhd_pub_t *dhd) kfree(rtt_header); } } -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); + + if (delayed_work_pending(&rtt_status->rtt_retry_timer)) { + cancel_delayed_work(&rtt_status->rtt_retry_timer); + } kfree(rtt_status->rtt_config.target_info); kfree(dhd->rtt_state); dhd->rtt_state = NULL; diff --git a/bcmdhd.100.10.315.x/dhd_rtt.h b/bcmdhd.100.10.315.x/dhd_rtt.h index df644ca..14d8926 100644 --- a/bcmdhd.100.10.315.x/dhd_rtt.h +++ b/bcmdhd.100.10.315.x/dhd_rtt.h @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -38,11 +38,18 @@ #define DEFAULT_RETRY_CNT 6 #define DEFAULT_FTM_FREQ 5180 #define DEFAULT_FTM_CNTR_FREQ0 5210 +#define RTT_MAX_GEOFENCE_TARGET_CNT 8 #define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count) #define TARGET_TYPE(target) (target->type) +#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED) +#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED) + +#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex) +#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex) + #ifndef BIT #define BIT(x) (1 << (x)) #endif // endif @@ -63,6 +70,13 @@ #define WL_RATE_54M 108 /* in 500kbps units */ #define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) +#ifdef WL_NAN +/* RTT Retry Timer Interval */ +#define DHD_RTT_RETRY_TIMER_INTERVAL_MS 3000u +#endif /* WL_NAN */ + +#define DHD_RTT_INVALID_TARGET_INDEX -1 + enum rtt_role { RTT_INITIATOR = 0, RTT_TARGET = 1 @@ -109,8 +123,8 @@ typedef enum rtt_reason { RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request RTT_STATUS_INVALID_REQ = 13, // bad request args - RTT_STATUS_NO_WIFI = 14, // WiFi not enabled - // Responder overrides param info, cannot range with new params + RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info + // cannot range with new params RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15 } rtt_reason_t; @@ -149,6 +163,13 @@ enum rtt_rate_bw { RTT_RATE_160M }; +typedef enum ranging_type { + RTT_TYPE_INVALID = 0, + RTT_TYPE_LEGACY = 1, + RTT_TYPE_NAN_DIRECTED = 2, + RTT_TYPE_NAN_GEOFENCE = 3 +} ranging_type_t; + #define FTM_MAX_NUM_BURST_EXP 14 #define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY) #define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY) @@ -210,7 +231,8 @@ typedef struct rtt_target_info { * in a single frame */ uint32 num_frames_per_burst; - /* num of frames in each RTT burst + /* + * num of frames in each RTT burst * for single side, measurement result num = frame number * for 2 side RTT, measurement result num = frame number - 1 */ @@ -230,35 +252,86 @@ typedef struct rtt_target_info { * at the end of the burst_duration it requested. */ uint32 burst_duration; + uint32 burst_timeout; uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */ uint8 bw; /* 5, 10, 20, 40, 80, 160 */ } rtt_target_info_t; +typedef struct rtt_goefence_target_info { + bool valid; + struct ether_addr peer_addr; +} rtt_geofence_target_info_t; + typedef struct rtt_config_params { int8 rtt_target_cnt; rtt_target_info_t *target_info; } rtt_config_params_t; +typedef struct rtt_geofence_cfg { + int8 geofence_target_cnt; + bool rtt_in_progress; + bool role_concurr_state; + int8 cur_target_idx; + rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT]; + int geofence_rtt_interval; +#ifdef RTT_GEOFENCE_CONT + bool geofence_cont; +#endif /* RTT_GEOFENCE_CONT */ +} rtt_geofence_cfg_t; + +/* + * Keep Adding more reasons + * going forward if needed + */ +enum rtt_schedule_reason { + RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */ + RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */ + RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */ + RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */ + RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */ + RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */ + RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */ + RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */ + RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */ + RTT_SCHED_RTT_RETRY_GEOFENCE = 10, /* On Geofence Retry */ + RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE = 11 /* On Rng Term, while pending role change */ +}; + +/* + * Keep Adding more invalid RTT states + * going forward if needed + */ +enum rtt_invalid_state { + RTT_STATE_VALID = 0, /* RTT state is valid */ + RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */ +}; + typedef struct rtt_status_info { - dhd_pub_t *dhd; - int8 status; /* current status for the current entry */ - int8 txchain; /* current device tx chain */ - int pm; /* to save current value of pm */ - int8 pm_restore; /* flag to reset the old value of pm */ - int8 cur_idx; /* current entry to do RTT */ - bool all_cancel; /* cancel all request once we got the cancel requet */ - uint32 flags; /* indicate whether device is configured as initiator or target */ - struct capability { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 txchain; /* current device tx chain */ + int pm; /* to save current value of pm */ + int8 pm_restore; /* flag to reset the old value of pm */ + int8 cur_idx; /* current entry to do RTT */ + bool all_cancel; /* cancel all request once we got the cancel requet */ + uint32 flags; /* indicate whether device is configured as initiator or target */ + struct capability { int32 proto :8; int32 feature :8; int32 preamble :8; int32 bw :8; } rtt_capa; /* rtt capability */ - struct mutex rtt_mutex; - rtt_config_params_t rtt_config; - struct work_struct work; - struct list_head noti_fn_list; - struct list_head rtt_results_cache; /* store results for RTT */ + struct mutex rtt_mutex; + struct mutex rtt_work_mutex; + struct mutex geofence_mutex; + rtt_config_params_t rtt_config; + rtt_geofence_cfg_t geofence_cfg; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ + int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */ + struct delayed_work proxd_timeout; /* Proxd Timeout work */ + struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */ } rtt_status_info_t; typedef struct rtt_report { @@ -305,12 +378,17 @@ typedef struct rtt_results_header { struct list_head list; struct list_head result_list; } rtt_results_header_t; - +struct rtt_result_detail { + uint8 num_ota_meas; + uint32 result_flags; +}; /* rtt_result to link all of rtt_report */ typedef struct rtt_result { struct list_head list; struct rtt_report report; int32 report_len; /* total length of rtt_report */ + struct rtt_result_detail rtt_detail; + int32 detail_len; } rtt_result_t; /* RTT Capabilities */ @@ -365,6 +443,57 @@ dhd_rtt_idx_to_burst_duration(uint idx); int dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); +#ifdef WL_NAN +void dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd); +#ifdef RTT_GEOFENCE_CONT +void dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont); + +void dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont); +#endif /* RTT_GEOFENCE_CONT */ + +#ifdef RTT_GEOFENCE_INTERVAL +void dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval); +#endif /* RTT_GEOFENCE_INTERVAL */ + +void dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state); + +bool dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd); + +int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd); + +void dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state); + +bool dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd); + +rtt_geofence_target_info_t* +dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, + int8 *index); + +int +dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target); + +int +dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr); + +int +dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd); + +int +dhd_rtt_delete_nan_session(dhd_pub_t *dhd); +#endif /* WL_NAN */ + +uint8 +dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr); + +void +dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason); + int dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); @@ -394,4 +523,14 @@ dhd_rtt_init(dhd_pub_t *dhd); int dhd_rtt_deinit(dhd_pub_t *dhd); + +#ifdef WL_CFG80211 +int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, + struct ether_addr *peer); + +void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd); + +int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd); +#endif /* WL_CFG80211 */ + #endif /* __DHD_RTT_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_sdio.c b/bcmdhd.100.10.315.x/dhd_sdio.c old mode 100755 new mode 100644 index 6833114..46c55fe --- a/bcmdhd.100.10.315.x/dhd_sdio.c +++ b/bcmdhd.100.10.315.x/dhd_sdio.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for SDIO * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_sdio.c 771911 2018-07-12 12:35:33Z $ + * $Id: dhd_sdio.c 825481 2019-06-14 10:06:03Z $ */ #include @@ -191,7 +191,7 @@ extern unsigned int system_hw_rev; /* Device console log buffer state */ #define CONSOLE_LINE_MAX 192 -#define CONSOLE_BUFFER_MAX 2024 +#define CONSOLE_BUFFER_MAX 8192 typedef struct dhd_console { uint count; /* Poll interval msec counter */ uint log_addr; /* Log struct address (fixed) */ @@ -348,6 +348,10 @@ typedef struct dhd_bus { bool usebufpool; int32 txinrx_thres; /* num of in-queued pkts */ int32 dotxinrx; /* tx first in dhdsdio_readframes */ +#ifdef BCMSDIO_RXLIM_POST + bool rxlim_en; + uint32 rxlim_addr; +#endif /* BCMSDIO_RXLIM_POST */ #ifdef SDTEST /* external loopback */ bool ext_loop; @@ -723,6 +727,7 @@ static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerro #if defined(DHD_FW_COREDUMP) static int dhdsdio_mem_dump(dhd_bus_t *bus); +static int dhdsdio_get_mem_dump(dhd_bus_t *bus); #endif /* DHD_FW_COREDUMP */ static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); @@ -943,7 +948,8 @@ dhdsdio_sr_cap(dhd_bus_t *bus) (bus->sih->chip == BCM4362_CHIP_ID) || (bus->sih->chip == BCM43012_CHIP_ID) || (bus->sih->chip == BCM43014_CHIP_ID) || - (bus->sih->chip == BCM43751_CHIP_ID)) { + (bus->sih->chip == BCM43751_CHIP_ID) || + (bus->sih->chip == BCM43752_CHIP_ID)) { core_capext = TRUE; } else { core_capext = bcmsdh_reg_read(bus->sdh, @@ -1021,7 +1027,8 @@ dhdsdio_sr_init(dhd_bus_t *bus) CHIPID(bus->sih->chip) == BCM43012_CHIP_ID || CHIPID(bus->sih->chip) == BCM4362_CHIP_ID || CHIPID(bus->sih->chip) == BCM43014_CHIP_ID || - CHIPID(bus->sih->chip) == BCM43751_CHIP_ID) + CHIPID(bus->sih->chip) == BCM43751_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43752_CHIP_ID) dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); if (bus->sih->chip == BCM43012_CHIP_ID) { @@ -1400,12 +1407,10 @@ dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; dhd_os_send_hang_message(bus->dhd); } -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ return BCME_ERROR; } else { ht_avail_error = 0; @@ -1940,6 +1945,7 @@ void dhdsdio_reset_bt_use_count(struct dhd_bus *bus) } #endif /* BT_OVER_SDIO */ +#ifdef USE_DYNAMIC_F2_BLKSIZE int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) { int func_blk_size = function_num; @@ -1961,13 +1967,14 @@ int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, 0, &func_blk_size, sizeof(int32), IOV_SET); if (bcmerr != BCME_OK) { - DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + DHD_ERROR(("%s: Set F%d Block size error\n", __FUNCTION__, function_num)); return BCME_ERROR; } } return BCME_OK; } +#endif /* USE_DYNAMIC_F2_BLKSIZE */ #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) void @@ -2037,7 +2044,7 @@ dhd_bus_txdata(struct dhd_bus *bus, void *pkt) prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); /* move from dhdsdio_sendfromq(), try to orphan skb early */ - if (bus->dhd->conf->orphan_move) + if (bus->dhd->conf->orphan_move == 1) PKTORPHAN(pkt, bus->dhd->conf->tsq); /* Check for existing queue, current flow-control, pending event, or pending clock */ @@ -2618,6 +2625,10 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) #ifdef DHD_LOSSLESS_ROAMING uint8 *pktdata; struct ether_header *eh; +#ifdef BDC + struct bdc_header *bdc_header; + uint8 data_offset; +#endif // endif #endif /* DHD_LOSSLESS_ROAMING */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -2662,7 +2673,9 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) pktdata = (uint8 *)PKTDATA(osh, pkts[i]); #ifdef BDC /* Skip BDC header */ - pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset; + bdc_header = (struct bdc_header *)pktdata; + data_offset = bdc_header->dataOffset; + pktdata += BDC_HEADER_LEN + (data_offset << 2); #endif // endif eh = (struct ether_header *)pktdata; if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { @@ -2671,6 +2684,11 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) /* Restore to original priority for 802.1X packet */ if (prio == PRIO_8021D_NC) { PKTSETPRIO(pkts[i], dhd->prio_8021x); +#ifdef BDC + /* Restore to original priority in BDC header */ + bdc_header->priority = + (dhd->prio_8021x & BDC_PRIORITY_MASK); +#endif // endif } } #endif /* DHD_LOSSLESS_ROAMING */ @@ -2882,8 +2900,13 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) } else { bus->dhd->txcnt_timeout++; if (!bus->dhd->hang_was_sent) { - DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d, bus->tx_max %d, bus->tx_seq %d\n", - __FUNCTION__, bus->dhd->txcnt_timeout, bus->tx_max, bus->tx_seq)); + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); +#ifdef BCMSDIO_RXLIM_POST + DHD_ERROR(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n", + __FUNCTION__, + bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr)); +#endif /* BCMSDIO_RXLIM_POST */ } #ifdef DHD_FW_COREDUMP /* Collect socram dump */ @@ -2971,7 +2994,7 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) return -EIO; /* Wait until control frame is available */ - timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, false); + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); dhd_os_sdlock(bus->dhd); rxlen = bus->rxlen; @@ -2992,7 +3015,7 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) dhd_sched_dpc(bus->dhd); /* Wait until control frame is available */ - timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, true); + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); dhd_os_sdlock(bus->dhd); rxlen = bus->rxlen; @@ -3551,6 +3574,21 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) sh->console_addr = ltoh32(sh->console_addr); sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); +#ifdef BCMSDIO_RXLIM_POST + if (sh->flags & SDPCM_SHARED_RXLIM_POST) { + if (bus->dhd->conf->rxlim_en) + bus->rxlim_en = !!sh->msgtrace_addr; + bus->rxlim_addr = sh->msgtrace_addr; + DHD_INFO(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n", + __FUNCTION__, + bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr)); + sh->flags &= ~SDPCM_SHARED_RXLIM_POST; + } else { + bus->rxlim_en = 0; + DHD_INFO(("%s: FW has no rx limit post support\n", __FUNCTION__)); + } +#endif /* BCMSDIO_RXLIM_POST */ + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) return BCME_OK; @@ -3838,21 +3876,32 @@ dhd_bus_mem_dump(dhd_pub_t *dhdp) return dhdsdio_mem_dump(bus); } +int +dhd_bus_get_mem_dump(dhd_pub_t *dhdp) +{ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdsdio_get_mem_dump(dhdp->bus); +} + static int -dhdsdio_mem_dump(dhd_bus_t *bus) +dhdsdio_get_mem_dump(dhd_bus_t *bus) { - int ret = 0; - int size; /* Full mem size */ + int ret = BCME_ERROR; + int size = bus->ramsize; /* Full mem size */ uint32 start = bus->dongle_ram_base; /* Start address */ uint read_size = 0; /* Read size of each iteration */ - uint8 *buf = NULL, *databuf = NULL; + uint8 *p_buf = NULL, *databuf = NULL; /* Get full mem size */ - size = bus->ramsize; - buf = dhd_get_fwdump_buf(bus->dhd, size); - if (!buf) { - DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); - return -1; + p_buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!p_buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", + __FUNCTION__, size)); + return BCME_ERROR; } dhd_os_sdlock(bus->dhd); @@ -3861,12 +3910,11 @@ dhdsdio_mem_dump(dhd_bus_t *bus) /* Read mem content */ DHD_ERROR(("Dump dongle memory\n")); - databuf = buf; - while (size) - { + databuf = p_buf; + while (size) { read_size = MIN(MEMBLOCK, size); - if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) - { + ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size); + if (ret) { DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); ret = BCME_ERROR; break; @@ -3885,10 +3933,31 @@ dhdsdio_mem_dump(dhd_bus_t *bus) dhd_os_sdunlock(bus->dhd); - /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */ - if (!ret) { - /* buf, actually soc_ram free handled in dhd_{free,clear} */ - dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + return ret; +} + +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + dhd_pub_t *dhdp; + int ret = BCME_ERROR; + + dhdp = bus->dhd; + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return ret; + } + + ret = dhdsdio_get_mem_dump(bus); + if (ret) { + DHD_ERROR(("%s: failed to get mem dump, err=%d\n", + __FUNCTION__, ret)); + } else { + /* schedule a work queue to perform actual memdump. + * dhd_mem_dump() performs the job + */ + dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); + /* soc_ram free handled in dhd_{free,clear} */ } return ret; @@ -4612,6 +4681,12 @@ dhdsdio_write_vars(dhd_bus_t *bus) return bcmerror; } +bool +dhd_bus_is_multibp_capable(struct dhd_bus *bus) +{ + return MULTIBP_CAP(bus->sih); +} + static int dhdsdio_download_state(dhd_bus_t *bus, bool enter) { @@ -4872,10 +4947,11 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { bus->blocksize = 0; - DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + DHD_ERROR(("%s: fail on fn %d %s get\n", + __FUNCTION__, fnum, "sd_blocksize")); } else { - DHD_INFO(("%s: noted %s update, value now %d\n", - __FUNCTION__, "sd_blocksize", bus->blocksize)); + DHD_INFO(("%s: noted fn %d %s update, value now %d\n", + __FUNCTION__, fnum, "sd_blocksize", bus->blocksize)); dhdsdio_tune_fifoparam(bus); } @@ -4943,6 +5019,7 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) { /* if Firmware already hangs disbale any interrupt */ + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; bus->hostintmask = 0; bcmsdh_intr_disable(bus->sdh); @@ -4987,6 +5064,7 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) /* Change our idea of bus state */ DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); } @@ -5085,6 +5163,9 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) #else /* BCMSPI */ uint8 saveclk; #endif /* BCMSPI */ +#if defined(SDIO_ISR_THREAD) + int intr_extn; +#endif DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -5170,6 +5251,16 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", __FUNCTION__, enable, ready, tmo.elapsed)); +#if defined(SDIO_ISR_THREAD) + if (dhdp->conf->intr_extn) { + intr_extn = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, NULL); + if (intr_extn & 0x1) { + intr_extn |= 0x2; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, intr_extn, NULL); + } + } +#endif + /* If F2 successfully enabled, set core and enable interrupts */ if (ready == enable) { /* Make sure we're talking to the core. */ @@ -5325,8 +5416,10 @@ dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) fail: /* If we can't reach the device, signal failure */ - if (err || bcmsdh_regfail(sdh)) + if (err || bcmsdh_regfail(sdh)) { + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; + } } static void @@ -5664,7 +5757,7 @@ dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) /* Check window for sanity */ if ((uint8)(txmax - bus->tx_seq) > 0x70) { - DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n", __FUNCTION__, txmax, bus->tx_seq)); txmax = bus->tx_max; } @@ -6265,12 +6358,12 @@ dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) if ((uint8)(txmax - bus->tx_seq) > 0x70) { #ifdef BCMSPI if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) { - DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n", __FUNCTION__, txmax, bus->tx_seq)); txmax = bus->tx_seq + 2; } else { #endif /* BCMSPI */ - DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n", __FUNCTION__, txmax, bus->tx_seq)); txmax = bus->tx_max; #ifdef BCMSPI @@ -6430,7 +6523,7 @@ dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) /* Check window for sanity */ if ((uint8)(txmax - bus->tx_seq) > 0x70) { - DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n", __FUNCTION__, txmax, bus->tx_seq)); txmax = bus->tx_max; } @@ -6766,6 +6859,7 @@ dhdsdio_dpc(dhd_bus_t *bus) devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); if (err) { DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; } else { ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); @@ -6776,6 +6870,7 @@ dhdsdio_dpc(dhd_bus_t *bus) clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (err) { DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; } @@ -6786,6 +6881,7 @@ dhdsdio_dpc(dhd_bus_t *bus) if (err) { DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; } devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; @@ -6793,6 +6889,7 @@ dhdsdio_dpc(dhd_bus_t *bus) if (err) { DHD_ERROR(("%s: error writing DEVCTL: %d\n", __FUNCTION__, err)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; } bus->clkstate = CLK_AVAIL; @@ -6969,6 +7066,19 @@ clkwait: } #endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ +#ifdef BCMSDIO_RXLIM_POST + if (!DATAOK(bus) && bus->rxlim_en) { + uint8 rxlim = 0; + if (0 == dhdsdio_membytes(bus, FALSE, bus->rxlim_addr, (uint8 *)&rxlim, 1)) { + if (bus->tx_max != rxlim) { + DHD_INFO(("%s: bus->tx_max/rxlim=%d/%d\n", __FUNCTION__, + bus->tx_max, rxlim)); + bus->tx_max = rxlim; + } + } + } +#endif /* BCMSDIO_RXLIM_POST */ + #ifdef PROP_TXSTATUS dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE); #endif // endif @@ -7021,6 +7131,7 @@ clkwait: } else { DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n", __FUNCTION__)); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; bus->intstatus = 0; } @@ -7110,8 +7221,6 @@ dhdsdio_isr(void *arg) dhd_bus_t *bus = (dhd_bus_t*)arg; bcmsdh_info_t *sdh; - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - if (!bus) { DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); return; @@ -7874,11 +7983,12 @@ dhdsdio_chipmatch(uint16 chipid) if (chipid == BCM4369_CHIP_ID) return TRUE; - if (chipid == BCM4362_CHIP_ID) return TRUE; if (chipid == BCM43751_CHIP_ID) return TRUE; + if (chipid == BCM43752_CHIP_ID) + return TRUE; return FALSE; } @@ -8062,7 +8172,8 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, #endif /* GET_CUSTOM_MAC_ENABLE */ /* Ok, have the per-port tell the stack we're open for business */ - if (dhd_register_if(bus->dhd, 0, TRUE) != 0) { + if (dhd_attach_net(bus->dhd, TRUE) != 0) + { DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); goto fail; } @@ -8172,11 +8283,25 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, value = F0_BLOCK_SIZE; else value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16); + /* Get block size from sd */ + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32), + &size, sizeof(int32), FALSE) != BCME_OK) { + size = 0; + DHD_ERROR(("%s: fail on fn %d %s get\n", + __FUNCTION__, fn, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for fn %d %s is %d\n", + __FUNCTION__, fn, "sd_blocksize", size)); + } + if (size != 0 && size < value) { + value = size; + } + value = fn << 16 | value; if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value, sizeof(value), TRUE) != BCME_OK) { bus->blocksize = 0; - DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, - "sd_blocksize")); + DHD_ERROR(("%s: fail on fn %d %s set\n", __FUNCTION__, + fn, "sd_blocksize")); } #endif #ifdef DHD_DEBUG @@ -8282,6 +8407,9 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, case BCM43751_CHIP_ID: bus->dongle_ram_base = CR4_43751_RAM_BASE; break; + case BCM43752_CHIP_ID: + bus->dongle_ram_base = CR4_43752_RAM_BASE; + break; case BCM4369_CHIP_ID: bus->dongle_ram_base = CR4_4369_RAM_BASE; break; @@ -8426,6 +8554,7 @@ dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); #endif /* !BCMSPI */ + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; bus->sleeping = FALSE; bus->rxflow = FALSE; @@ -8466,10 +8595,10 @@ dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { bus->blocksize = 0; - DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + DHD_ERROR(("%s: fail on fn %d %s get\n", __FUNCTION__, fnum, "sd_blocksize")); } else { - DHD_INFO(("%s: Initial value for %s is %d\n", - __FUNCTION__, "sd_blocksize", bus->blocksize)); + DHD_INFO(("%s: Initial value for fn %d %s is %d\n", + __FUNCTION__, fnum, "sd_blocksize", bus->blocksize)); dhdsdio_tune_fifoparam(bus); } @@ -8527,38 +8656,6 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, return ret; } -void -dhd_set_path_params(struct dhd_bus *bus) -{ - /* External conf takes precedence if specified */ - dhd_conf_preinit(bus->dhd); - - if (bus->dhd->conf_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); - } - if (bus->dhd->clm_path[0] == '\0') { - dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); - } -#ifdef CONFIG_PATH_AUTO_SELECT - dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); -#endif - - dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); - - dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); - dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); - dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); - - dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->fw_path); - dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->nv_path); - - printf("Final fw_path=%s\n", bus->fw_path); - printf("Final nv_path=%s\n", bus->nv_path); - printf("Final clm_path=%s\n", bus->dhd->clm_path); - printf("Final conf_path=%s\n", bus->dhd->conf_path); - -} - void dhd_set_bus_params(struct dhd_bus *bus) { @@ -8603,7 +8700,7 @@ dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) /* Download the firmware */ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); - dhd_set_path_params(bus); + dhd_conf_set_path_params(bus->dhd, bus->sdh, bus->sih, bus->fw_path, bus->nv_path); dhd_set_bus_params(bus); ret = _dhdsdio_download_firmware(bus); @@ -8784,10 +8881,14 @@ dhdsdio_suspend(void *context) } DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + /* stop all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); bus->dhd->busstate = DHD_BUS_SUSPEND; if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { DHD_ERROR(("Tx Request is not ended\n")); bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); return -EBUSY; } @@ -8814,6 +8915,8 @@ dhdsdio_suspend(void *context) DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); if (ret) { bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); } DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); dhd_os_busbusy_wake(bus->dhd); @@ -8846,6 +8949,8 @@ dhdsdio_resume(void *context) DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); bus->dhd->busstate = DHD_BUS_DATA; dhd_os_busbusy_wake(bus->dhd); + /* resume all interface network queue. */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); return 0; @@ -9457,11 +9562,13 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); bus->dhd->up = FALSE; dhd_txglom_enable(dhdp, FALSE); dhd_os_sdunlock(dhdp); DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); @@ -9472,7 +9579,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) } else { /* App must have restored power to device before calling */ - printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__); + printf("\n\n%s: == Power ON ==\n", __FUNCTION__); if (bus->dhd->dongle_reset) { /* Turn on WLAN */ @@ -9487,6 +9594,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) bus->cl_devid)) { DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); bus->dhd->busstate = DHD_BUS_DOWN; DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); diff --git a/bcmdhd.100.10.315.x/dhd_static_buf.c b/bcmdhd.100.10.315.x/dhd_static_buf.c index 2352ba8..77ad69b 100644 --- a/bcmdhd.100.10.315.x/dhd_static_buf.c +++ b/bcmdhd.100.10.315.x/dhd_static_buf.c @@ -1,535 +1,464 @@ -#include -#include -#include -#include -#include -#include -#include - -#define DHD_STATIC_VERSION_STR "100.10.315.2" - -#define BCMDHD_SDIO -#define BCMDHD_PCIE - -enum dhd_prealloc_index { - DHD_PREALLOC_PROT = 0, -#if defined(BCMDHD_SDIO) - DHD_PREALLOC_RXBUF = 1, - DHD_PREALLOC_DATABUF = 2, -#endif - DHD_PREALLOC_OSL_BUF = 3, - DHD_PREALLOC_SKB_BUF = 4, - DHD_PREALLOC_WIPHY_ESCAN0 = 5, - DHD_PREALLOC_WIPHY_ESCAN1 = 6, - DHD_PREALLOC_DHD_INFO = 7, - DHD_PREALLOC_DHD_WLFC_INFO = 8, -#ifdef BCMDHD_PCIE - DHD_PREALLOC_IF_FLOW_LKUP = 9, -#endif - DHD_PREALLOC_MEMDUMP_BUF = 10, - DHD_PREALLOC_MEMDUMP_RAM = 11, - DHD_PREALLOC_DHD_WLFC_HANGER = 12, - DHD_PREALLOC_PKTID_MAP = 13, - DHD_PREALLOC_PKTID_MAP_IOCTL = 14, - DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, - DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, - DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, - DHD_PREALLOC_STAT_REPORT_BUF = 18, - DHD_PREALLOC_WL_ESCAN_INFO = 19, - DHD_PREALLOC_FW_VERBOSE_RING = 20, - DHD_PREALLOC_FW_EVENT_RING = 21, - DHD_PREALLOC_DHD_EVENT_RING = 22, - DHD_PREALLOC_NAN_EVENT_RING = 23, - DHD_PREALLOC_MAX -}; - -#define STATIC_BUF_MAX_NUM 20 -#define STATIC_BUF_SIZE (PAGE_SIZE*2) - -#define DHD_PREALLOC_PROT_SIZE (16 * 1024) -#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) -#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) -#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) -#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) -#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024) -#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024) -#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024) -#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (66 * 1024) -#ifdef CONFIG_64BIT -#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) -#else -#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) -#endif -#define FW_VERBOSE_RING_SIZE (256 * 1024) -#define FW_EVENT_RING_SIZE (64 * 1024) -#define DHD_EVENT_RING_SIZE (64 * 1024) -#define NAN_EVENT_RING_SIZE (64 * 1024) - -#if defined(CONFIG_64BIT) -#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) -#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) -#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) -#else -#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) -#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) -#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) -#endif /* CONFIG_64BIT */ -#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) - -#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) -#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) -#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) - -#define DHD_SKB_1PAGE_BUF_NUM 8 -#ifdef BCMDHD_PCIE -#define DHD_SKB_2PAGE_BUF_NUM 64 -#elif defined(BCMDHD_SDIO) -#define DHD_SKB_2PAGE_BUF_NUM 8 -#endif -#define DHD_SKB_4PAGE_BUF_NUM 1 - -/* The number is defined in linux_osl.c - * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM - * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM - */ -#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ - (DHD_SKB_2PAGE_BUF_NUM)) -#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM)) - -void *wlan_static_prot = NULL; -void *wlan_static_rxbuf = NULL; -void *wlan_static_databuf = NULL; -void *wlan_static_osl_buf = NULL; -void *wlan_static_scan_buf0 = NULL; -void *wlan_static_scan_buf1 = NULL; -void *wlan_static_dhd_info_buf = NULL; -void *wlan_static_dhd_wlfc_info_buf = NULL; -void *wlan_static_if_flow_lkup = NULL; -void *wlan_static_dhd_memdump_ram_buf = NULL; -void *wlan_static_dhd_wlfc_hanger_buf = NULL; -void *wlan_static_wl_escan_info_buf = NULL; -void *wlan_static_fw_verbose_ring_buf = NULL; -void *wlan_static_fw_event_ring_buf = NULL; -void *wlan_static_dhd_event_ring_buf = NULL; -void *wlan_static_nan_event_ring_buf = NULL; - -static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; - -void *dhd_wlan_mem_prealloc(int section, unsigned long size) -{ - pr_err("%s: sectoin %d, %ld\n", __func__, section, size); - if (section == DHD_PREALLOC_PROT) - return wlan_static_prot; - -#if defined(BCMDHD_SDIO) - if (section == DHD_PREALLOC_RXBUF) - return wlan_static_rxbuf; - - if (section == DHD_PREALLOC_DATABUF) - return wlan_static_databuf; -#endif /* BCMDHD_SDIO */ - - if (section == DHD_PREALLOC_SKB_BUF) - return wlan_static_skb; - - if (section == DHD_PREALLOC_WIPHY_ESCAN0) - return wlan_static_scan_buf0; - - if (section == DHD_PREALLOC_WIPHY_ESCAN1) - return wlan_static_scan_buf1; - - if (section == DHD_PREALLOC_OSL_BUF) { - if (size > DHD_PREALLOC_OSL_BUF_SIZE) { - pr_err("request OSL_BUF(%lu) > %ld\n", - size, DHD_PREALLOC_OSL_BUF_SIZE); - return NULL; - } - return wlan_static_osl_buf; - } - - if (section == DHD_PREALLOC_DHD_INFO) { - if (size > DHD_PREALLOC_DHD_INFO_SIZE) { - pr_err("request DHD_INFO size(%lu) > %d\n", - size, DHD_PREALLOC_DHD_INFO_SIZE); - return NULL; - } - return wlan_static_dhd_info_buf; - } - if (section == DHD_PREALLOC_DHD_WLFC_INFO) { - if (size > WLAN_DHD_WLFC_BUF_SIZE) { - pr_err("request DHD_WLFC_INFO size(%lu) > %d\n", - size, WLAN_DHD_WLFC_BUF_SIZE); - return NULL; - } - return wlan_static_dhd_wlfc_info_buf; - } -#ifdef BCMDHD_PCIE - if (section == DHD_PREALLOC_IF_FLOW_LKUP) { - if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { - pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n", - size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); - return NULL; - } - - return wlan_static_if_flow_lkup; - } -#endif /* BCMDHD_PCIE */ - if (section == DHD_PREALLOC_MEMDUMP_RAM) { - if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) { - pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n", - size, DHD_PREALLOC_MEMDUMP_RAM_SIZE); - return NULL; - } - - return wlan_static_dhd_memdump_ram_buf; - } - if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { - if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { - pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n", - size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); - return NULL; - } - return wlan_static_dhd_wlfc_hanger_buf; - } - if (section == DHD_PREALLOC_WL_ESCAN_INFO) { - if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) { - pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n", - size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); - return NULL; - } - - return wlan_static_wl_escan_info_buf; - } - if (section == DHD_PREALLOC_FW_VERBOSE_RING) { - if (size > FW_VERBOSE_RING_SIZE) { - pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n", - size, FW_VERBOSE_RING_SIZE); - return NULL; - } - - return wlan_static_fw_verbose_ring_buf; - } - if (section == DHD_PREALLOC_FW_EVENT_RING) { - if (size > FW_EVENT_RING_SIZE) { - pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n", - size, FW_EVENT_RING_SIZE); - return NULL; - } - - return wlan_static_fw_event_ring_buf; - } - if (section == DHD_PREALLOC_DHD_EVENT_RING) { - if (size > DHD_EVENT_RING_SIZE) { - pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n", - size, DHD_EVENT_RING_SIZE); - return NULL; - } - - return wlan_static_dhd_event_ring_buf; - } - if (section == DHD_PREALLOC_NAN_EVENT_RING) { - if (size > NAN_EVENT_RING_SIZE) { - pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n", - size, NAN_EVENT_RING_SIZE); - return NULL; - } - - return wlan_static_nan_event_ring_buf; - } - if ((section < 0) || (section > DHD_PREALLOC_MAX)) - pr_err("request section id(%d) is out of max index %d\n", - section, DHD_PREALLOC_MAX); - - pr_err("%s: failed to alloc section %d, size=%ld\n", - __func__, section, size); - - return NULL; -} -EXPORT_SYMBOL(dhd_wlan_mem_prealloc); - -static int dhd_init_wlan_mem(void) -{ - int i; - int j; - printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR); - - for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); - if (!wlan_static_skb[i]) { - goto err_skb_alloc; - } - pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, - DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE); - } - - for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); - if (!wlan_static_skb[i]) { - goto err_skb_alloc; - } - pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, - DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE); - } - -#if defined(BCMDHD_SDIO) - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); - if (!wlan_static_skb[i]) - goto err_skb_alloc; - pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, - DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE); -#endif /* BCMDHD_SDIO */ - - wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL); - if (!wlan_static_prot) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE); - -#if defined(BCMDHD_SDIO) - wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL); - if (!wlan_static_rxbuf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE); - - wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL); - if (!wlan_static_databuf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE); -#endif /* BCMDHD_SDIO */ - - wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL); - if (!wlan_static_osl_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%ld\n", __func__, - DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE); - - wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); - if (!wlan_static_scan_buf0) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE); - - wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_info_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE); - - wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_wlfc_info_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE); - -#ifdef BCMDHD_PCIE - wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); - if (!wlan_static_if_flow_lkup) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); -#endif /* BCMDHD_PCIE */ - - wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_memdump_ram_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE); - - wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_wlfc_hanger_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); - - wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL); - if (!wlan_static_wl_escan_info_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); - - wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL); - if (!wlan_static_fw_verbose_ring_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE); - - wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL); - if (!wlan_static_fw_event_ring_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE); - - wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_event_ring_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE); - - wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL); - if (!wlan_static_nan_event_ring_buf) - goto err_mem_alloc; - pr_err("%s: sectoin %d, size=%d\n", __func__, - DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE); - - return 0; - -err_mem_alloc: - - if (wlan_static_prot) - kfree(wlan_static_prot); - -#if defined(BCMDHD_SDIO) - if (wlan_static_rxbuf) - kfree(wlan_static_rxbuf); - - if (wlan_static_databuf) - kfree(wlan_static_databuf); -#endif /* BCMDHD_SDIO */ - - if (wlan_static_osl_buf) - kfree(wlan_static_osl_buf); - - if (wlan_static_scan_buf0) - kfree(wlan_static_scan_buf0); - - if (wlan_static_scan_buf1) - kfree(wlan_static_scan_buf1); - - if (wlan_static_dhd_info_buf) - kfree(wlan_static_dhd_info_buf); - - if (wlan_static_dhd_wlfc_info_buf) - kfree(wlan_static_dhd_wlfc_info_buf); - -#ifdef BCMDHD_PCIE - if (wlan_static_if_flow_lkup) - kfree(wlan_static_if_flow_lkup); -#endif /* BCMDHD_PCIE */ - - if (wlan_static_dhd_memdump_ram_buf) - kfree(wlan_static_dhd_memdump_ram_buf); - - if (wlan_static_dhd_wlfc_hanger_buf) - kfree(wlan_static_dhd_wlfc_hanger_buf); - - if (wlan_static_wl_escan_info_buf) - kfree(wlan_static_wl_escan_info_buf); - -#ifdef BCMDHD_PCIE - if (wlan_static_fw_verbose_ring_buf) - kfree(wlan_static_fw_verbose_ring_buf); - - if (wlan_static_fw_event_ring_buf) - kfree(wlan_static_fw_event_ring_buf); - - if (wlan_static_dhd_event_ring_buf) - kfree(wlan_static_dhd_event_ring_buf); - - if (wlan_static_nan_event_ring_buf) - kfree(wlan_static_nan_event_ring_buf); -#endif /* BCMDHD_PCIE */ - - pr_err("%s: Failed to mem_alloc for WLAN\n", __func__); - - i = WLAN_SKB_BUF_NUM; - -err_skb_alloc: - pr_err("%s: Failed to skb_alloc for WLAN\n", __func__); - for (j = 0; j < i; j++) - dev_kfree_skb(wlan_static_skb[j]); - - return -ENOMEM; -} - -static int __init -dhd_static_buf_init(void) -{ - dhd_init_wlan_mem(); - - return 0; -} - -static void __exit -dhd_static_buf_exit(void) -{ - int i; - - pr_err("%s()\n", __FUNCTION__); - - for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { - if (wlan_static_skb[i]) - dev_kfree_skb(wlan_static_skb[i]); - } - - for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { - if (wlan_static_skb[i]) - dev_kfree_skb(wlan_static_skb[i]); - } - -#if defined(BCMDHD_SDIO) - if (wlan_static_skb[i]) - dev_kfree_skb(wlan_static_skb[i]); -#endif /* BCMDHD_SDIO */ - - if (wlan_static_prot) - kfree(wlan_static_prot); - -#if defined(BCMDHD_SDIO) - if (wlan_static_rxbuf) - kfree(wlan_static_rxbuf); - - if (wlan_static_databuf) - kfree(wlan_static_databuf); -#endif /* BCMDHD_SDIO */ - - if (wlan_static_osl_buf) - kfree(wlan_static_osl_buf); - - if (wlan_static_scan_buf0) - kfree(wlan_static_scan_buf0); - - if (wlan_static_scan_buf1) - kfree(wlan_static_scan_buf1); - - if (wlan_static_dhd_info_buf) - kfree(wlan_static_dhd_info_buf); - - if (wlan_static_dhd_wlfc_info_buf) - kfree(wlan_static_dhd_wlfc_info_buf); - -#ifdef BCMDHD_PCIE - if (wlan_static_if_flow_lkup) - kfree(wlan_static_if_flow_lkup); -#endif /* BCMDHD_PCIE */ - - if (wlan_static_dhd_memdump_ram_buf) - kfree(wlan_static_dhd_memdump_ram_buf); - - if (wlan_static_dhd_wlfc_hanger_buf) - kfree(wlan_static_dhd_wlfc_hanger_buf); - - if (wlan_static_wl_escan_info_buf) - kfree(wlan_static_wl_escan_info_buf); - -#ifdef BCMDHD_PCIE - if (wlan_static_fw_verbose_ring_buf) - kfree(wlan_static_fw_verbose_ring_buf); - - if (wlan_static_fw_event_ring_buf) - kfree(wlan_static_fw_event_ring_buf); - - if (wlan_static_dhd_event_ring_buf) - kfree(wlan_static_dhd_event_ring_buf); - - if (wlan_static_nan_event_ring_buf) - kfree(wlan_static_nan_event_ring_buf); -#endif - - return; -} - -module_init(dhd_static_buf_init); - -module_exit(dhd_static_buf_exit); +/* + * drivers/amlogic/wifi/dhd_static_buf.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#define pr_fmt(fmt) "Wifi: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DHD_STATIC_VERSION_STR "100.10.545.3" + +#define BCMDHD_SDIO +#define BCMDHD_PCIE + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, +#if defined(BCMDHD_SDIO) + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, +#endif + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, +#ifdef BCMDHD_PCIE + DHD_PREALLOC_IF_FLOW_LKUP = 9, +#endif + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_WEXT_INFO = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23, + DHD_PREALLOC_MAX +}; + +#define STATIC_BUF_MAX_NUM 20 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) + +#define DHD_PREALLOC_PROT_SIZE (16 * 1024) +#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) +#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) +#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) +#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) +#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024) +#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024) +#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024) +#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024) +#define DHD_PREALLOC_WL_WEXT_INFO_SIZE (70 * 1024) +#ifdef CONFIG_64BIT +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) +#else +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif +#define FW_VERBOSE_RING_SIZE (256 * 1024) +#define FW_EVENT_RING_SIZE (64 * 1024) +#define DHD_EVENT_RING_SIZE (64 * 1024) +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) + +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 64 +#define DHD_SKB_4PAGE_BUF_NUM 1 + +/* The number is defined in linux_osl.c + * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM + * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM + */ +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM)) + +void *wlan_static_prot; +void *wlan_static_rxbuf; +void *wlan_static_databuf; +void *wlan_static_osl_buf; +void *wlan_static_scan_buf0; +void *wlan_static_scan_buf1; +void *wlan_static_dhd_info_buf; +void *wlan_static_dhd_wlfc_info_buf; +void *wlan_static_if_flow_lkup; +void *wlan_static_dhd_memdump_ram_buf; +void *wlan_static_dhd_wlfc_hanger_buf; +void *wlan_static_dhd_log_dump_buf; +void *wlan_static_dhd_log_dump_buf_ex; +void *wlan_static_wl_escan_info_buf; +void *wlan_static_fw_verbose_ring_buf; +void *wlan_static_fw_event_ring_buf; +void *wlan_static_dhd_event_ring_buf; +void *wlan_static_nan_event_ring_buf; + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +void *bcmdhd_mem_prealloc(int section, unsigned long size) +{ + pr_info("sectoin %d, size %ld\n", section, size); + if (section == DHD_PREALLOC_PROT) + return wlan_static_prot; + +#if defined(BCMDHD_SDIO) + if (section == DHD_PREALLOC_RXBUF) + return wlan_static_rxbuf; + + if (section == DHD_PREALLOC_DATABUF) + return wlan_static_databuf; +#endif /* BCMDHD_SDIO */ + + if (section == DHD_PREALLOC_SKB_BUF) + return wlan_static_skb; + + if (section == DHD_PREALLOC_WIPHY_ESCAN0) + return wlan_static_scan_buf0; + + if (section == DHD_PREALLOC_WIPHY_ESCAN1) + return wlan_static_scan_buf1; + + if (section == DHD_PREALLOC_OSL_BUF) { + if (size > DHD_PREALLOC_OSL_BUF_SIZE) { + pr_err("request OSL_BUF(%lu) > %ld\n", + size, DHD_PREALLOC_OSL_BUF_SIZE); + return NULL; + } + return wlan_static_osl_buf; + } + + if (section == DHD_PREALLOC_DHD_INFO) { + if (size > DHD_PREALLOC_DHD_INFO_SIZE) { + pr_err("request DHD_INFO size(%lu) > %d\n", + size, DHD_PREALLOC_DHD_INFO_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + if (section == DHD_PREALLOC_DHD_WLFC_INFO) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC_INFO size(%lu) > %d\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_info_buf; + } +#ifdef BCMDHD_PCIE + if (section == DHD_PREALLOC_IF_FLOW_LKUP) { + if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n", + size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); + return NULL; + } + + return wlan_static_if_flow_lkup; + } +#endif /* BCMDHD_PCIE */ + if (section == DHD_PREALLOC_MEMDUMP_RAM) { + if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) { + pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_MEMDUMP_RAM_SIZE); + return NULL; + } + + return wlan_static_dhd_memdump_ram_buf; + } + if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { + if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n", + size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger_buf; + } + if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF) { + if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE) { + pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE); + return NULL; + } + + return wlan_static_dhd_log_dump_buf; + } + if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX) { + if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE) { + pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE); + return NULL; + } + + return wlan_static_dhd_log_dump_buf_ex; + } + if (section == DHD_PREALLOC_WL_WEXT_INFO) { + if (size > DHD_PREALLOC_WL_WEXT_INFO_SIZE) { + pr_err("request DHD_PREALLOC_WL_WEXT_INFO_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_WL_WEXT_INFO_SIZE); + return NULL; + } + + return wlan_static_wl_escan_info_buf; + } + if (section == DHD_PREALLOC_FW_VERBOSE_RING) { + if (size > FW_VERBOSE_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n", + size, FW_VERBOSE_RING_SIZE); + return NULL; + } + + return wlan_static_fw_verbose_ring_buf; + } + if (section == DHD_PREALLOC_FW_EVENT_RING) { + if (size > FW_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n", + size, FW_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_fw_event_ring_buf; + } + if (section == DHD_PREALLOC_DHD_EVENT_RING) { + if (size > DHD_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n", + size, DHD_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_dhd_event_ring_buf; + } + if (section == DHD_PREALLOC_NAN_EVENT_RING) { + if (size > NAN_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n", + size, NAN_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_nan_event_ring_buf; + } + if ((section < 0) || (section > DHD_PREALLOC_MAX)) + pr_err("request section id(%d) is out of max index %d\n", + section, DHD_PREALLOC_MAX); + + pr_err("%s: failed to alloc section %d, size=%ld\n", + __func__, section, size); + + return NULL; +} +EXPORT_SYMBOL(bcmdhd_mem_prealloc); + +int bcmdhd_init_wlan_mem(void) +{ + int i; + int j; + pr_info("%s(): %s\n", __func__, DHD_STATIC_VERSION_STR); + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); + if (!wlan_static_skb[i]) + goto err_skb_alloc; + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); + if (!wlan_static_skb[i]) + goto err_skb_alloc; + } + +#if defined(BCMDHD_SDIO) + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); + if (!wlan_static_skb[i]) + goto err_skb_alloc; +#endif /* BCMDHD_SDIO */ + + wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL); + if (!wlan_static_prot) + goto err_mem_alloc; + +#if defined(BCMDHD_SDIO) + wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL); + if (!wlan_static_rxbuf) + goto err_mem_alloc; + + wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL); + if (!wlan_static_databuf) + goto err_mem_alloc; +#endif /* BCMDHD_SDIO */ + + wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_osl_buf) + goto err_mem_alloc; + + wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) + goto err_mem_alloc; + + wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) + goto err_mem_alloc; + + wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_info_buf) + goto err_mem_alloc; + +#ifdef BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); + if (!wlan_static_if_flow_lkup) + goto err_mem_alloc; +#endif /* BCMDHD_PCIE */ + + wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram_buf) + goto err_mem_alloc; + + wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger_buf) + goto err_mem_alloc; + + wlan_static_dhd_log_dump_buf = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf) + goto err_mem_alloc; + + wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf_ex) + goto err_mem_alloc; + + wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_WEXT_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_wl_escan_info_buf) + goto err_mem_alloc; + + wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL); + if (!wlan_static_fw_verbose_ring_buf) + goto err_mem_alloc; + + wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_fw_event_ring_buf) + goto err_mem_alloc; + + wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_event_ring_buf) + goto err_mem_alloc; + + wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_nan_event_ring_buf) + goto err_mem_alloc; + + pr_info("bcmdhd_init_wlan_mem prealloc ok\n"); + return 0; + +err_mem_alloc: + + if (wlan_static_prot) + kfree(wlan_static_prot); + +#if defined(BCMDHD_SDIO) + if (wlan_static_rxbuf) + kfree(wlan_static_rxbuf); + + if (wlan_static_databuf) + kfree(wlan_static_databuf); +#endif /* BCMDHD_SDIO */ + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + + if (wlan_static_dhd_info_buf) + kfree(wlan_static_dhd_info_buf); + + if (wlan_static_dhd_wlfc_info_buf) + kfree(wlan_static_dhd_wlfc_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif /* BCMDHD_PCIE */ + + if (wlan_static_dhd_memdump_ram_buf) + kfree(wlan_static_dhd_memdump_ram_buf); + + if (wlan_static_dhd_wlfc_hanger_buf) + kfree(wlan_static_dhd_wlfc_hanger_buf); + + if (wlan_static_dhd_log_dump_buf) + kfree(wlan_static_dhd_log_dump_buf); + + if (wlan_static_dhd_log_dump_buf_ex) + kfree(wlan_static_dhd_log_dump_buf_ex); + + if (wlan_static_wl_escan_info_buf) + kfree(wlan_static_wl_escan_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_fw_verbose_ring_buf) + kfree(wlan_static_fw_verbose_ring_buf); + + if (wlan_static_fw_event_ring_buf) + kfree(wlan_static_fw_event_ring_buf); + + if (wlan_static_dhd_event_ring_buf) + kfree(wlan_static_dhd_event_ring_buf); + + if (wlan_static_nan_event_ring_buf) + kfree(wlan_static_nan_event_ring_buf); +#endif /* BCMDHD_PCIE */ + + pr_err("%s: Failed to mem_alloc for WLAN\n", __func__); + + i = WLAN_SKB_BUF_NUM; + +err_skb_alloc: + pr_err("%s: Failed to skb_alloc for WLAN\n", __func__); + for (j = 0; j < i; j++) + dev_kfree_skb(wlan_static_skb[j]); + + return -ENOMEM; +} +EXPORT_SYMBOL(bcmdhd_init_wlan_mem); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("AMLOGIC"); +MODULE_DESCRIPTION("wifi device tree driver"); diff --git a/bcmdhd.100.10.315.x/dhd_wlfc.c b/bcmdhd.100.10.315.x/dhd_wlfc.c index 09eab7d..a7f034e 100644 --- a/bcmdhd.100.10.315.x/dhd_wlfc.c +++ b/bcmdhd.100.10.315.x/dhd_wlfc.c @@ -1,7 +1,7 @@ /* * DHD PROP_TXSTATUS Module. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -3580,7 +3580,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) dhd_os_wlfc_block(dhd); if (dhd->wlfc_enabled) { - DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); + DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); dhd_os_wlfc_unblock(dhd); return BCME_OK; } @@ -3604,7 +3604,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) Leaving the message for now, it should be removed after a while; once the tlv situation is stable. */ - DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", dhd->wlfc_enabled?"enabled":"disabled", tlv)); } @@ -3614,7 +3614,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); if (!ret) { - DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); + DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); if (WLFC_IS_OLD_DEF(fw_caps)) { #ifdef BCMDBUS @@ -3645,7 +3645,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) } } - DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); + DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); #ifdef LIMIT_BORROW dhd->wlfc_borrow_allowed = TRUE; #endif // endif @@ -3810,8 +3810,6 @@ dhd_wlfc_deinit(dhd_pub_t *dhd) dhd->proptxstatus_mode = hostreorder ? WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; - DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv)); - dhd_os_wlfc_unblock(dhd); if (dhd->plat_deinit) diff --git a/bcmdhd.100.10.315.x/dhd_wlfc.h b/bcmdhd.100.10.315.x/dhd_wlfc.h index 918de38..88b0650 100644 --- a/bcmdhd.100.10.315.x/dhd_wlfc.h +++ b/bcmdhd.100.10.315.x/dhd_wlfc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dngl_stats.h b/bcmdhd.100.10.315.x/dngl_stats.h index 6bbe9fd..a570f97 100644 --- a/bcmdhd.100.10.315.x/dngl_stats.h +++ b/bcmdhd.100.10.315.x/dngl_stats.h @@ -2,7 +2,7 @@ * Common stats definitions for clients of dongle * ports * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/dngl_wlhdr.h b/bcmdhd.100.10.315.x/dngl_wlhdr.h index 6c297c8..ca367ad 100644 --- a/bcmdhd.100.10.315.x/dngl_wlhdr.h +++ b/bcmdhd.100.10.315.x/dngl_wlhdr.h @@ -1,7 +1,7 @@ /* * Dongle WL Header definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/frag.c b/bcmdhd.100.10.315.x/frag.c index c2fde6b..de6f98c 100644 --- a/bcmdhd.100.10.315.x/frag.c +++ b/bcmdhd.100.10.315.x/frag.c @@ -2,7 +2,7 @@ * IE/TLV fragmentation/defragmentation support for * Broadcom 802.11bang Networking Device Driver * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -73,7 +73,7 @@ bcm_tlv_dot11_defrag(const void *buf, uint buf_len, uint8 id, bool id_ext, } /* adjust buf_len to length after ie including it */ - buf_len -= ((const uint8 *)ie - (const uint8 *)buf); + buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf)); /* update length from fragments, okay if no next ie */ while ((ie = bcm_next_tlv(ie, &buf_len)) && diff --git a/bcmdhd.100.10.315.x/frag.h b/bcmdhd.100.10.315.x/frag.h index 5ab2092..b415047 100644 --- a/bcmdhd.100.10.315.x/frag.h +++ b/bcmdhd.100.10.315.x/frag.h @@ -2,7 +2,7 @@ * IE/TLV (de)fragmentation declarations/definitions for * Broadcom 802.11abgn Networking Device Driver * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/hnd_pktpool.c b/bcmdhd.100.10.315.x/hnd_pktpool.c index 93ad389..3bbe7dc 100644 --- a/bcmdhd.100.10.315.x/hnd_pktpool.c +++ b/bcmdhd.100.10.315.x/hnd_pktpool.c @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/hnd_pktq.c b/bcmdhd.100.10.315.x/hnd_pktq.c index dabeff0..5d88483 100644 --- a/bcmdhd.100.10.315.x/hnd_pktq.c +++ b/bcmdhd.100.10.315.x/hnd_pktq.c @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/hndlhl.c b/bcmdhd.100.10.315.x/hndlhl.c index 73973cb..f563e71 100644 --- a/bcmdhd.100.10.315.x/hndlhl.c +++ b/bcmdhd.100.10.315.x/hndlhl.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing lhl specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/hndmem.c b/bcmdhd.100.10.315.x/hndmem.c index 6c8a513..7ad6323 100644 --- a/bcmdhd.100.10.315.x/hndmem.c +++ b/bcmdhd.100.10.315.x/hndmem.c @@ -1,7 +1,7 @@ /* * Utility routines for configuring different memories in Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/hndpmu.c b/bcmdhd.100.10.315.x/hndpmu.c index 3a6da91..4cf392d 100644 --- a/bcmdhd.100.10.315.x/hndpmu.c +++ b/bcmdhd.100.10.315.x/hndpmu.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing PMU corerev specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: hndpmu.c 714395 2017-08-04 08:22:31Z $ + * $Id: hndpmu.c 783841 2018-10-09 06:24:16Z $ */ /** diff --git a/bcmdhd.100.10.315.x/include/802.11.h b/bcmdhd.100.10.315.x/include/802.11.h index 77c51d2..5f8be4e 100644 --- a/bcmdhd.100.10.315.x/include/802.11.h +++ b/bcmdhd.100.10.315.x/include/802.11.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.11 * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.11.h 765782 2018-06-05 10:47:00Z $ + * $Id: 802.11.h 814166 2019-04-10 06:14:49Z $ */ #ifndef _802_11_H_ @@ -257,7 +257,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { uint16 capability; } BWL_POST_PACKED_STRUCT; #define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ -#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */ +#define DOT11_BCN_PRB_FIXED_LEN 12u /* 802.11 beacon/probe frame fixed length */ BWL_PRE_PACKED_STRUCT struct dot11_auth { uint16 alg; /* algorithm */ @@ -565,6 +565,7 @@ typedef struct dot11_extcap_ie dot11_extcap_ie_t; #define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6 #define DOT11_EXTCAP_LEN_TDLS_WBW 8 #define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 +#define DOT11_EXTCAP_LEN_TWT 10u /* TDLS Capabilities */ #define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ @@ -1473,6 +1474,8 @@ typedef struct ccx_qfl_ie ccx_qfl_ie_t; #define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */ #define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */ #define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */ +#define DOT11_MNG_NONTRANS_BSSID_CAP_ID 83 /* 11k nontransmitted BSSID capability */ +#define DOT11_MNG_MULTIPLE_BSSIDINDEX_ID 85 /* 11k multiple BSSID index */ #define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */ #define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */ #define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */ @@ -1533,8 +1536,8 @@ typedef struct ccx_qfl_ie ccx_qfl_ie_t; #define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID) #define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */ #define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID) -#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */ -#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID) +#define EXT_MNG_UORA_ID 37u /* UORA Parameter Set */ +#define DOT11_MNG_UORA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_UORA_ID) #define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */ #define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID) #define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */ @@ -1568,6 +1571,8 @@ typedef struct ccx_qfl_ie ccx_qfl_ie_t; #define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID #define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID #define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS +#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */ +#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID) /* End of deprecated definitions */ #define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\ @@ -1678,6 +1683,8 @@ typedef struct ccx_qfl_ie ccx_qfl_ie_t; #define DOT11_EXT_CAP_TIMBC 18 /* BSS Transition Management support bit position */ #define DOT11_EXT_CAP_BSSTRANS_MGMT 19 +/* Multiple BSSID support position */ +#define DOT11_EXT_CAP_MULTIBSSID 22 /* Direct Multicast Service */ #define DOT11_EXT_CAP_DMS 26 /* Interworking support bit position */ @@ -1698,13 +1705,15 @@ typedef struct ccx_qfl_ie ccx_qfl_ie_t; #define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ #define DOT11_EXT_CAP_FILS 72 /* FILS Capability */ /* TWT support */ -#define DOT11_EXT_CAP_TWT_REQUESTER 75 -#define DOT11_EXT_CAP_TWT_RESPONDER 76 +#define DOT11_EXT_CAP_TWT_REQUESTER 77 +#define DOT11_EXT_CAP_TWT_RESPONDER 78 +#define DOT11_EXT_CAP_OBSS_NB_RU_OFDMA 79 +#define DOT11_EXT_CAP_EMBSS_ADVERTISE 80 /* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset. * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path. * It will cause ROM invalidation otherwise. */ -#define DOT11_EXT_CAP_MAX_IDX 76 +#define DOT11_EXT_CAP_MAX_IDX 80 #define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ @@ -1997,6 +2006,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { } BWL_POST_PACKED_STRUCT; typedef struct dot11_bsstrans_req dot11_bsstrans_req_t; #define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */ +#define DOT11_BSSTRANS_REQ_FIXED_LEN 7u /* Fixed length */ /* BSS Mgmt Transition Request Mode Field - 802.11v */ #define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01 @@ -2890,7 +2900,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { struct ether_addr bssid; } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; -#define DOT11_RMREQ_BCN_LEN 18 +#define DOT11_RMREQ_BCN_LEN 18u BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { uint8 reg; @@ -2945,6 +2955,16 @@ typedef struct dot11_rmrep_last_bcn_rpt_ind_req dot11_rmrep_last_bcn_rpt_ind_req #define DOT11_RMREP_BCN_LAST_RPT_IND 164 #define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ +/* Refer IEEE P802.11-REVmd/D1.0 9.4.2.21.7 Beacon report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_fragmt_id { + uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */ + uint8 len; /* length of remaining fields */ + /* More fragments(B15), fragment Id(B8-B14), Bcn rpt instance ID (B0 - B7) */ + uint16 frag_info_rpt_id; +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_bcn_frm_body_fragmt_id dot11_rmrep_bcn_frm_body_fragmt_id_t; + BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id { uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */ uint8 len; /* length of remaining fields */ @@ -2953,9 +2973,15 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id { } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t; -#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2 +#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2u #define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t) -#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1 +#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1u +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_fragmt_id_t) +#define DOT11_RMREP_BCNRPT_BCN_RPT_ID_MASK 0x00FFu +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_SHIFT 8u +#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_MASK 0x7F00u +#define DOT11_RMREP_BCNRPT_MORE_FRAG_SHIFT 15u +#define DOT11_RMREP_BCNRPT_MORE_FRAG_MASK 0x8000u BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind { uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */ @@ -3782,6 +3808,34 @@ enum { BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ }; +/* + * This BRCM_PROP_OUI types is intended for use in events to embed additional + * data, and would not be expected to appear on the air -- but having an IE + * format allows IE frame data with extra data in events in that allows for + * more flexible parsing. + */ +#define BRCM_EVT_WL_BSS_INFO 64 + +/** + * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI). + * DPT uses this format with type set to DPT_IE_TYPE + */ +BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type of this IE */ + uint16 cap; /* DPT capabilities */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_prop_ie_s brcm_prop_ie_t; + +#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */ + +#define DPT_IE_TYPE 2 + +#define BRCM_SYSCAP_IE_TYPE 3 +#define WET_TUNNEL_IE_TYPE 3 + /* brcm syscap_ie cap */ #define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ @@ -4381,30 +4435,43 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \ VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0 +#ifdef IBSS_RMC +/* customer's OUI */ +#define RMC_PROP_OUI "\x00\x16\x32" +#endif // endif + /* ************* WPA definitions. ************* */ #define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ #define WPA_OUI_LEN 3 /* WPA OUI length */ #define WPA_OUI_TYPE 1 #define WPA_VERSION 1 /* WPA version */ #define WPA_VERSION_LEN 2 /* WPA version length */ + +/* ************* WPA2 definitions. ************* */ #define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ #define WPA2_OUI_LEN 3 /* WPA2 OUI length */ #define WPA2_VERSION 1 /* WPA2 version */ #define WPA2_VERSION_LEN 2 /* WAP2 version length */ -#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */ +#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */ /* ************* WPS definitions. ************* */ #define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ #define WPS_OUI_LEN 3 /* WPS OUI length */ #define WPS_OUI_TYPE 4 +/* ************* TPC definitions. ************* */ +#define TPC_OUI "\x00\x50\xF2" /* TPC OUI */ +#define TPC_OUI_LEN 3 /* TPC OUI length */ +#define TPC_OUI_TYPE 8 +#define WFA_OUI_TYPE_TPC 8 /* deprecated */ + /* ************* WFA definitions. ************* */ #define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ -#define WFA_OUI_LEN 3 /* WFA OUI length */ +#define WFA_OUI_LEN 3 /* WFA OUI length */ #define WFA_OUI_TYPE_P2P 9 #ifdef WL_LEGACY_P2P -#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */ +#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */ #define APPLE_OUI_LEN 3 #define APPLE_OUI_TYPE_P2P 5 #endif /* WL_LEGACY_P2P */ @@ -4419,7 +4486,6 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P #endif /* !WL_LEGACY_P2P */ -#define WFA_OUI_TYPE_TPC 8 #ifdef WLTDLS #define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */ #define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */ @@ -4514,11 +4580,20 @@ typedef struct dot11_mdid_ie dot11_mdid_ie_t; /* length of data portion of Mobility Domain IE */ #define DOT11_MDID_IE_DATA_LEN 3 - +#define DOT11_MDID_LEN 2 #define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ #define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ -/** Fast Bss Transition IE */ +/* Fast Bss Transition IE */ +#ifdef FT_IE_VER_V2 +typedef BWL_PRE_PACKED_STRUCT struct dot11_ft_ie_v2 { + uint8 id; + uint8 len; + uint16 mic_control; + /* dynamic offset to following mic[], anonce[], snonce[] */ +} BWL_POST_PACKED_STRUCT dot11_ft_ie_v2; +typedef struct dot11_ft_ie_v2 dot11_ft_ie_t; +#else BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { uint8 id; uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */ @@ -4534,7 +4609,20 @@ typedef struct dot11_ft_ie dot11_ft_ie_t; * optional parameters, which if present, could raise the FT IE length to 255. */ #define DOT11_FT_IE_FIXED_LEN 82 - +#endif /* FT_IE_VER_V2 */ + +#ifdef FT_IE_VER_V2 +#define DOT11_FT_IE_LEN(mic_len) (sizeof(dot11_ft_ie_v2) + mic_len + EAPOL_WPA_KEY_NONCE_LEN *2) +#define FT_IE_MIC(pos) ((uint8 *)pos + sizeof(dot11_ft_ie_v2)) +#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len) +#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len + \ + EAPOL_WPA_KEY_NONCE_LEN) +#else +#define DOT11_FT_IE_LEN(mic_len) sizeof(dot11_ft_ie) +#define FT_IE_MIC(pos) ((uint8 *)&pos->mic) +#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)&pos->anonce) +#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)&pos->snonce) +#endif /* FT_IE_VER_V2 */ #define TIE_TYPE_RESERVED 0 #define TIE_TYPE_REASSOC_DEADLINE 1 #define TIE_TYPE_KEY_LIEFTIME 2 @@ -4980,6 +5068,17 @@ BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie { } BWL_POST_PACKED_STRUCT; typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t; +/* same as payload of dot11_ftm_vs_ie. +* This definition helps in having struct access +* of pay load while building FTM VS IE from other modules(NAN) +*/ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie_pyld { + uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */ + uint8 version; + ftm_vs_tlv_t tlvs[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_ie_pyld dot11_ftm_vs_ie_pyld_t; + /* ftm vs api version */ #define BCM_FTM_VS_PARAMS_VERSION 0x01 @@ -5253,6 +5352,11 @@ typedef enum { #define BCM_AIBSS_IE_TYPE 56 +#define SSE_OUI "\x00\x00\xF0" +#define VENDOR_ENTERPRISE_STA_OUI_TYPE 0x22 +#define MAX_VSIE_DISASSOC (1) +#define DISCO_VSIE_LEN 0x09u + /* This marks the end of a packed structure section. */ #include diff --git a/bcmdhd.100.10.315.x/include/802.11e.h b/bcmdhd.100.10.315.x/include/802.11e.h index 73dec3c..7a0e2d4 100644 --- a/bcmdhd.100.10.315.x/include/802.11e.h +++ b/bcmdhd.100.10.315.x/include/802.11e.h @@ -1,7 +1,7 @@ /* * 802.11e protocol header file * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $ + * $Id: 802.11e.h 785355 2018-10-18 05:32:56Z $ */ #ifndef _802_11e_H_ diff --git a/bcmdhd.100.10.315.x/include/802.11s.h b/bcmdhd.100.10.315.x/include/802.11s.h index 96121d8..6ceabb7 100644 --- a/bcmdhd.100.10.315.x/include/802.11s.h +++ b/bcmdhd.100.10.315.x/include/802.11s.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.11s Mesh * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/802.1d.h b/bcmdhd.100.10.315.x/include/802.1d.h index efc60b8..31a251f 100644 --- a/bcmdhd.100.10.315.x/include/802.1d.h +++ b/bcmdhd.100.10.315.x/include/802.1d.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.1D * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/802.3.h b/bcmdhd.100.10.315.x/include/802.3.h index b96b362..dee50a9 100644 --- a/bcmdhd.100.10.315.x/include/802.3.h +++ b/bcmdhd.100.10.315.x/include/802.3.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to 802.3 * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/aidmp.h b/bcmdhd.100.10.315.x/include/aidmp.h index aae2178..0007f83 100644 --- a/bcmdhd.100.10.315.x/include/aidmp.h +++ b/bcmdhd.100.10.315.x/include/aidmp.h @@ -1,7 +1,7 @@ /* * Broadcom AMBA Interconnect definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcm_cfg.h b/bcmdhd.100.10.315.x/include/bcm_cfg.h index f77dace..07e2087 100644 --- a/bcmdhd.100.10.315.x/include/bcm_cfg.h +++ b/bcmdhd.100.10.315.x/include/bcm_cfg.h @@ -1,7 +1,7 @@ /* * BCM common config options * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h b/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h index 90a14e2..755a51b 100644 --- a/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h +++ b/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h @@ -35,7 +35,7 @@ * and instrumentation on top of the heap, without modifying the heap * allocation implementation. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcm_ring.h b/bcmdhd.100.10.315.x/include/bcm_ring.h index 3736175..beb9660 100644 --- a/bcmdhd.100.10.315.x/include/bcm_ring.h +++ b/bcmdhd.100.10.315.x/include/bcm_ring.h @@ -6,7 +6,7 @@ * * NOTE: A ring of size N, may only hold N-1 elements. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -101,7 +101,7 @@ * private L1 data cache. * +---------------------------------------------------------------------------- * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmarp.h b/bcmdhd.100.10.315.x/include/bcmarp.h new file mode 100644 index 0000000..4ebb21d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmarp.h @@ -0,0 +1,90 @@ +/* + * Fundamental constants relating to ARP Protocol + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmarp.h 701633 2017-05-25 23:07:17Z $ + */ + +#ifndef _bcmarp_h_ +#define _bcmarp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif +#include + +/* This marks the start of a packed structure section. */ +#include + +#define ARP_OPC_OFFSET 6 /* option code offset */ +#define ARP_SRC_ETH_OFFSET 8 /* src h/w address offset */ +#define ARP_SRC_IP_OFFSET 14 /* src IP address offset */ +#define ARP_TGT_ETH_OFFSET 18 /* target h/w address offset */ +#define ARP_TGT_IP_OFFSET 24 /* target IP address offset */ + +#define ARP_OPC_REQUEST 1 /* ARP request */ +#define ARP_OPC_REPLY 2 /* ARP reply */ + +#define ARP_DATA_LEN 28 /* ARP data length */ + +#define HTYPE_ETHERNET 1 /* htype for ethernet */ +BWL_PRE_PACKED_STRUCT struct bcmarp { + uint16 htype; /* Header type (1 = ethernet) */ + uint16 ptype; /* Protocol type (0x800 = IP) */ + uint8 hlen; /* Hardware address length (Eth = 6) */ + uint8 plen; /* Protocol address length (IP = 4) */ + uint16 oper; /* ARP_OPC_... */ + uint8 src_eth[ETHER_ADDR_LEN]; /* Source hardware address */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source protocol address (not aligned) */ + uint8 dst_eth[ETHER_ADDR_LEN]; /* Destination hardware address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination protocol address */ +} BWL_POST_PACKED_STRUCT; + +/* Ethernet header + Arp message */ +BWL_PRE_PACKED_STRUCT struct bcmetharp { + struct ether_header eh; + struct bcmarp arp; +} BWL_POST_PACKED_STRUCT; + +/* IPv6 Neighbor Advertisement */ +#define NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET 8 /* src IPv6 address offset */ +#define NEIGHBOR_ADVERTISE_TYPE_OFFSET 40 /* type offset */ +#define NEIGHBOR_ADVERTISE_CHECKSUM_OFFSET 42 /* check sum offset */ +#define NEIGHBOR_ADVERTISE_FLAGS_OFFSET 44 /* R,S and O flags offset */ +#define NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET 48 /* target IPv6 address offset */ +#define NEIGHBOR_ADVERTISE_OPTION_OFFSET 64 /* options offset */ +#define NEIGHBOR_ADVERTISE_TYPE 136 +#define NEIGHBOR_SOLICITATION_TYPE 135 + +#define OPT_TYPE_SRC_LINK_ADDR 1 +#define OPT_TYPE_TGT_LINK_ADDR 2 + +#define NEIGHBOR_ADVERTISE_DATA_LEN 72 /* neighbor advertisement data length */ +#define NEIGHBOR_ADVERTISE_FLAGS_VALUE 0x60 /* R=0, S=1 and O=1 */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* !defined(_bcmarp_h_) */ diff --git a/bcmdhd.100.10.315.x/include/bcmbloom.h b/bcmdhd.100.10.315.x/include/bcmbloom.h index 62e8b01..bb41cdc 100644 --- a/bcmdhd.100.10.315.x/include/bcmbloom.h +++ b/bcmdhd.100.10.315.x/include/bcmbloom.h @@ -1,7 +1,7 @@ /* * Bloom filter support * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmcdc.h b/bcmdhd.100.10.315.x/include/bcmcdc.h index f642142..8ec17ca 100644 --- a/bcmdhd.100.10.315.x/include/bcmcdc.h +++ b/bcmdhd.100.10.315.x/include/bcmcdc.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmdefs.h b/bcmdhd.100.10.315.x/include/bcmdefs.h index a29c46e..c6440d8 100644 --- a/bcmdhd.100.10.315.x/include/bcmdefs.h +++ b/bcmdhd.100.10.315.x/include/bcmdefs.h @@ -1,7 +1,7 @@ /* * Misc system wide definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmdefs.h 700870 2017-05-22 19:05:22Z $ + * $Id: bcmdefs.h 788740 2018-11-13 21:45:01Z $ */ #ifndef _bcmdefs_h_ @@ -51,8 +51,8 @@ * Define these diagnostic macros to help suppress cast-qual warning * until all the work can be done to fix the casting issues. */ -#if defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \ - 4 && __GNUC_MINOR__ >= 6)) +#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6))) #define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") @@ -63,6 +63,21 @@ #define GCC_DIAGNOSTIC_POP() #endif /* Diagnostic macros not defined */ +/* Support clang for MACOSX compiler */ +#ifdef __clang__ +#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wformat-nonliteral\"") +#define CLANG_DIAGNOSTIC_POP() \ + _Pragma("clang diagnostic pop") +#else +#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() +#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() +#define CLANG_DIAGNOSTIC_POP() +#endif // endif /* Compile-time assert can be used in place of ASSERT if the expression evaluates * to a constant at compile time. */ diff --git a/bcmdhd.100.10.315.x/include/bcmdevs.h b/bcmdhd.100.10.315.x/include/bcmdevs.h index 30b2a87..247400d 100644 --- a/bcmdhd.100.10.315.x/include/bcmdevs.h +++ b/bcmdhd.100.10.315.x/include/bcmdevs.h @@ -1,7 +1,7 @@ /* * Broadcom device-specific manifest constants. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmdevs.h 760196 2018-04-30 10:57:59Z $ + * $Id: bcmdevs.h 825481 2019-06-14 10:06:03Z $ */ #ifndef _BCMDEVS_H @@ -279,6 +279,9 @@ #define BCM43751_D11AX_ID 0x4490 /* 43751 802.11ax dualband device */ #define BCM43751_D11AX2G_ID 0x4491 /* 43751 802.11ax 2.4G device */ #define BCM43751_D11AX5G_ID 0x4492 /* 43751 802.11ax 5G device */ +#define BCM43752_D11AX_ID 0x4490 /* 43752 802.11ax dualband device */ +#define BCM43752_D11AX2G_ID 0x4491 /* 43752 802.11ax 2.4G device */ +#define BCM43752_D11AX5G_ID 0x4492 /* 43752 802.11ax 5G device */ #define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */ #define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */ @@ -530,6 +533,7 @@ #define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */ #define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */ #define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */ +#define BCM43752_CHIP_ID 0xAAE8 /* 43752 chipcommon chipid */ #define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \ (CHIPID(chipid) == BCM4357_CHIP_ID) || \ diff --git a/bcmdhd.100.10.315.x/include/bcmdhcp.h b/bcmdhd.100.10.315.x/include/bcmdhcp.h index aad986c..8d97386 100644 --- a/bcmdhd.100.10.315.x/include/bcmdhcp.h +++ b/bcmdhd.100.10.315.x/include/bcmdhcp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to DHCP Protocol * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmendian.h b/bcmdhd.100.10.315.x/include/bcmendian.h index c9a95ae..4ce0595 100644 --- a/bcmdhd.100.10.315.x/include/bcmendian.h +++ b/bcmdhd.100.10.315.x/include/bcmendian.h @@ -1,7 +1,7 @@ /* * Byte order utilities * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmendian.h 633810 2016-04-25 16:46:55Z $ + * $Id: bcmendian.h 788572 2018-11-13 03:52:19Z $ * * This file by default provides proper behavior on little-endian architectures. * On big-endian architectures, IL_BIGENDIAN should be defined. @@ -108,10 +108,10 @@ #define load64_ua(a) ltoh64_ua(a) #define store64_ua(a, v) htol64_ua_store(v, a) -#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) -#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) -#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) -#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) +#define _LTOH16_UA(cp) (uint16)((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) (uint32)((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (uint16)(((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (uint32)(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) #define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \ ((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \ diff --git a/bcmdhd.100.10.315.x/include/bcmeth.h b/bcmdhd.100.10.315.x/include/bcmeth.h index 761ea44..e2189e4 100644 --- a/bcmdhd.100.10.315.x/include/bcmeth.h +++ b/bcmdhd.100.10.315.x/include/bcmeth.h @@ -1,7 +1,7 @@ /* * Broadcom Ethernettype protocol definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmevent.h b/bcmdhd.100.10.315.x/include/bcmevent.h index c26093b..1dbb7b5 100644 --- a/bcmdhd.100.10.315.x/include/bcmevent.h +++ b/bcmdhd.100.10.315.x/include/bcmevent.h @@ -3,7 +3,7 @@ * * Dependencies: bcmeth.h * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmevent.h 768756 2018-06-21 05:44:28Z $ + * $Id: bcmevent.h 822151 2019-05-28 18:37:23Z $ * */ @@ -276,27 +276,33 @@ typedef union bcm_event_msg_u { #define WLC_E_INVALID_IE 162 /* Received invalid IE */ #define WLC_E_MODE_SWITCH 163 /* Mode switch event */ #define WLC_E_PKT_FILTER 164 /* Packet filter event */ -#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing - * tx/rxchain - */ +#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing tx/rxchain */ #define WLC_E_FBT 166 /* FBT event */ -#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */ +#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */ #define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */ #define WLC_E_AGGR_EVENT 169 /* Aggregated event */ #define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */ -#define WLC_E_SCAN_START 172 /* Deprecated */ -#define WLC_E_SCAN 172 /* Scan event */ -#define WLC_E_MBO 173 /* MBO event */ -#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */ -#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */ -#define WLC_E_ADPS 176 /* ADPS event */ +#define WLC_E_SCAN_START 172 /* Deprecated */ +#define WLC_E_SCAN 172 /* Scan event */ +#define WLC_E_MBO 173 /* MBO event */ +#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */ +#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */ +#define WLC_E_ADPS 176 /* ADPS event */ #define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */ -#define WLC_E_HWA 178 /* HWA events */ -#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */ +#define WLC_E_HWA 178 /* HWA events */ +#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */ #define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */ -#define WLC_E_LAST 181 /* highest val + 1 for range checking */ -#if (WLC_E_LAST > 181) -#error "WLC_E_LAST: Invalid value for last event; must be <= 181." +#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */ +#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */ +#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */ +#define WLC_E_WA_LQM 184 /* link quality monitoring */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel + * dwell time complete + */ +#define WLC_E_WSEC 186 /* wsec keymgmt event */ +#define WLC_E_LAST 187 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 187) +#error "WLC_E_LAST: Invalid value for last event; must be <= 187." #endif /* WLC_E_LAST */ /* define an API for getting the string name of an event */ @@ -335,6 +341,8 @@ void wl_event_to_network_order(wl_event_msg_t * evt); #define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */ #define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */ #define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */ +#define WLC_E_STATUS_RXBCN_ABORT 20 /* Rx Beacon abort event for FAKEAP feature */ +#define WLC_E_STATUS_LOWPOWER_ON_LOWSPAN 21 /* LOWPOWER scan request during LOWSPAN */ #define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ /* 4-way handshake event type */ @@ -364,8 +372,6 @@ void wl_event_to_network_order(wl_event_msg_t * evt); #define WLC_E_REASON_INFRA_ROAM 2 #define WLC_E_REASON_INFRA_DISASSOC 3 #define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4 -#define WLC_E_REASON_AWDL_ENABLE 5 -#define WLC_E_REASON_AWDL_DISABLE 6 /* WLC_E_SDB_TRANSITION event data */ #define WL_MAX_BSSCFG 4 @@ -387,7 +393,6 @@ typedef struct wl_event_sdb_trans { /* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */ #define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */ -#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */ #define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */ /* roam reason codes */ @@ -408,9 +413,12 @@ typedef struct wl_event_sdb_trans { #define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ #define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */ #define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */ -#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */ -#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */ -#define WLC_E_REASON_LAST 16 /* NOTE: increment this as you add reasons above */ +#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */ +#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */ +#define WLC_E_REASON_SILENT_ROAM 16 /* roamed due to Silent roam */ +#define WLC_E_REASON_INACTIVITY 17 /* full roam scan due to inactivity */ +#define WLC_E_REASON_ROAM_SCAN_TIMEOUT 18 /* roam scan timer timeout */ +#define WLC_E_REASON_LAST 19 /* NOTE: increment this as you add reasons above */ /* prune reason codes */ #define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ @@ -439,6 +447,8 @@ typedef struct wl_event_sdb_trans { #define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */ #endif /* BCMCCX */ #define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ +#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */ +#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */ /* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ #define WLC_E_SUP_OTHER 0 /* Other reason */ @@ -462,6 +472,55 @@ typedef struct wl_event_sdb_trans { #define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */ #define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */ #define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */ +#define WLC_E_SUP_PTK_UPDATE 21 /* PTK update */ +#define WLC_E_SUP_MSG1_PMKID_MISMATCH 22 /* MSG1 PMKID not matched to PMKSA cache list */ + +/* event msg for WLC_E_SUP_PTK_UPDATE */ +typedef struct wlc_sup_ptk_update { + uint16 version; /* 0x0001 */ + uint16 length; /* length of data that follows */ + uint32 tsf_low; /* tsf at which ptk updated by internal supplicant */ + uint32 tsf_high; + uint8 key_id; /* always 0 for PTK update */ + uint8 tid; /* tid for the PN below - PTK refresh is per key */ + uint16 pn_low; + uint32 pn_high; /* local highest PN of any tid of the key when M4 was sent */ +} wlc_sup_ptk_update_t; + +/* sub event of WLC_E_WSEC */ +typedef enum { + WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR = 0x01 +} wl_wsec_event_type_t; + +/* sub event msg - WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR */ +struct wlc_wsec_ptk_pn_sync_error_v1 { + uint32 tsf_low; /* tsf at which PN sync error happened */ + uint32 tsf_high; + uint8 key_id; /* always 0 for PTK update */ + uint8 tid; /* tid for the PN below - PTK refresh is per key */ + uint16 PAD1; + uint16 rx_seqn; /* d11 seq number */ + uint16 pn_low; + uint32 pn_high; /* local PN window start for the tid */ + uint16 key_idx; /* key idx in the keymgmt */ + uint16 rx_pn_low; + uint32 rx_pn_high; /* Rx PN window start for the tid */ + uint32 span_time; /* time elapsed since replay */ + uint32 span_pkts; /* pkt count since replay */ +}; + +typedef struct wlc_wsec_ptk_pn_sync_error_v1 wlc_wsec_ptk_pn_sync_error_t; + +/* WLC_E_WSEC event msg */ +typedef struct wlc_wsec_event { + uint16 version; /* 0x0001 */ + uint16 length; /* length of data that follows */ + uint16 type; /* wsec_event_type_t */ + uint16 PAD1; + union { + wlc_wsec_ptk_pn_sync_error_t pn_sync_err; + } data; +} wlc_wsec_event_t; /* Ucode reason codes carried in the WLC_E_MACDBG event */ #define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */ @@ -532,8 +591,8 @@ typedef struct wl_event_data_natoe { #define WLC_E_IF_ROLE_WDS 2 /* WDS link */ #define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ #define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ -#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ -#define WLC_E_IF_ROLE_NAN 9 /* NAN */ +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ +#define WLC_E_IF_ROLE_NAN 9 /* NAN */ /* WLC_E_RSSI event data */ typedef struct wl_event_data_rssi { @@ -542,6 +601,22 @@ typedef struct wl_event_data_rssi { int32 noise; } wl_event_data_rssi_t; +#define WL_EVENT_WA_LQM_VER 0 /* initial version */ + +#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */ +typedef struct { /* payload of subevent in xtlv */ + int32 rssi; + int32 snr; + uint32 tx_rate; + uint32 rx_rate; +} wl_event_wa_lqm_basic_t; + +typedef struct wl_event_wa_lqm { + uint16 ver; /* version */ + uint16 len; /* total length structure */ + uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */ +} wl_event_wa_lqm_t; + /* WLC_E_IF flag */ #define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ @@ -550,6 +625,7 @@ typedef struct wl_event_data_rssi { #define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ #define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ #define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ +#define WLC_E_LINK_ASSOC_FAIL 5 /* Link down because assoc to new AP during roaming failed */ /* WLC_E_NDIS_LINK event data */ typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { @@ -794,6 +870,7 @@ typedef enum wl_nan_events { WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */ WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */ WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */ + WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */ WL_NAN_EVENT_INVALID /* delimiter for max value */ } nan_app_events_e; @@ -988,11 +1065,6 @@ typedef enum { CHANSW_NAN = 18, /* channel switch due to NAN */ CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */ CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */ - CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */ - CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */ - CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */ - CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */ - CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */ CHANSW_TDLS = 26, /* channel switch due to TDLS */ CHANSW_PROXD = 27, /* channel switch due to PROXD */ CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */ @@ -1111,8 +1183,9 @@ typedef struct { typedef enum wl_mbo_event_type { WL_MBO_E_CELLULAR_NW_SWITCH = 1, + WL_MBO_E_BTM_RCVD = 2, /* ADD before this */ - WL_MBO_E_LAST = 2, /* highest val + 1 for range checking */ + WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */ } wl_mbo_event_type_t; /* WLC_E_MBO event structure version */ @@ -1144,6 +1217,31 @@ struct wl_event_mbo_cell_nw_switch { uint32 assoc_time_remain; }; +/* WLC_E_MBO_BTM_RCVD event structure version */ +#define WL_BTM_EVENT_DATA_VER_1 1 +/* Specific btm event type data */ +struct wl_btm_event_type_data { + uint16 version; + uint16 len; + uint8 transition_reason; /* transition reason code */ + uint8 pad[3]; /* pad */ +}; + +/* WLC_E_PRUNE event structure version */ +#define WL_BSSID_PRUNE_EVT_VER_1 1 +/* MBO-OCE params */ +struct wl_bssid_prune_evt_info { + uint16 version; + uint16 len; + uint8 SSID[32]; + uint32 time_remaining; /* Time remaining */ + struct ether_addr BSSID; + uint8 SSID_len; + uint8 reason; /* Reason code */ + int8 rssi_threshold; /* RSSI threshold */ + uint8 pad[3]; /* pad */ +}; + /* WLC_E_HWA Event structure */ typedef struct wl_event_hwa { uint16 version; /* structure version */ @@ -1185,4 +1283,7 @@ typedef struct wl_event_adps { } wl_event_adps_v1_t; typedef wl_event_adps_v1_t wl_event_adps_t; + +#define WLC_USER_E_KEY_UPDATE 1 /* Key add/remove */ + #endif /* _BCMEVENT_H_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmicmp.h b/bcmdhd.100.10.315.x/include/bcmicmp.h new file mode 100644 index 0000000..3731903 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmicmp.h @@ -0,0 +1,89 @@ +/* + * Fundamental constants relating to ICMP Protocol + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmicmp.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmicmp_h_ +#define _bcmicmp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#define ICMP_TYPE_ECHO_REQUEST 8 /* ICMP type echo request */ +#define ICMP_TYPE_ECHO_REPLY 0 /* ICMP type echo reply */ + +#define ICMP_CHKSUM_OFFSET 2 /* ICMP body checksum offset */ + +/* ICMP6 error and control message types */ +#define ICMP6_DEST_UNREACHABLE 1 +#define ICMP6_PKT_TOO_BIG 2 +#define ICMP6_TIME_EXCEEDED 3 +#define ICMP6_PARAM_PROBLEM 4 +#define ICMP6_ECHO_REQUEST 128 +#define ICMP6_ECHO_REPLY 129 +#define ICMP_MCAST_LISTENER_QUERY 130 +#define ICMP_MCAST_LISTENER_REPORT 131 +#define ICMP_MCAST_LISTENER_DONE 132 +#define ICMP6_RTR_SOLICITATION 133 +#define ICMP6_RTR_ADVERTISEMENT 134 +#define ICMP6_NEIGH_SOLICITATION 135 +#define ICMP6_NEIGH_ADVERTISEMENT 136 +#define ICMP6_REDIRECT 137 + +#define ICMP6_RTRSOL_OPT_OFFSET 8 +#define ICMP6_RTRADV_OPT_OFFSET 16 +#define ICMP6_NEIGHSOL_OPT_OFFSET 24 +#define ICMP6_NEIGHADV_OPT_OFFSET 24 +#define ICMP6_REDIRECT_OPT_OFFSET 40 + +BWL_PRE_PACKED_STRUCT struct icmp6_opt { + uint8 type; /* Option identifier */ + uint8 length; /* Lenth including type and length */ + uint8 data[0]; /* Variable length data */ +} BWL_POST_PACKED_STRUCT; + +#define ICMP6_OPT_TYPE_SRC_LINK_LAYER 1 +#define ICMP6_OPT_TYPE_TGT_LINK_LAYER 2 +#define ICMP6_OPT_TYPE_PREFIX_INFO 3 +#define ICMP6_OPT_TYPE_REDIR_HDR 4 +#define ICMP6_OPT_TYPE_MTU 5 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmicmp_hdr { + uint8 type; /* Echo or Echo-reply */ + uint8 code; /* Always 0 */ + uint16 chksum; /* Icmp packet checksum */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmicmp_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmiov.h b/bcmdhd.100.10.315.x/include/bcmiov.h index 585914d..28e01fe 100644 --- a/bcmdhd.100.10.315.x/include/bcmiov.h +++ b/bcmdhd.100.10.315.x/include/bcmiov.h @@ -4,7 +4,7 @@ * To be used in firmware and host apps or dhd - reducing code size, * duplication, and maintenance overhead. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmip.h b/bcmdhd.100.10.315.x/include/bcmip.h index 9415488..b5940af 100644 --- a/bcmdhd.100.10.315.x/include/bcmip.h +++ b/bcmdhd.100.10.315.x/include/bcmip.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to IP Protocol * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmip.h 705929 2017-06-20 00:06:46Z $ + * $Id: bcmip.h 785436 2018-10-18 17:54:25Z $ */ #ifndef _bcmip_h_ @@ -212,11 +212,11 @@ ipv6_exthdr_len(uint8 *h, uint8 *proto) if (eh->nexthdr == IPV6_EXTHDR_NONE) return -1; else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) - hlen = 8; + hlen = 8U; else if (eh->nexthdr == IPV6_EXTHDR_AUTH) - hlen = (eh->hdrlen + 2) << 2; + hlen = (uint16)((eh->hdrlen + 2U) << 2U); else - hlen = IPV6_EXTHDR_LEN(eh); + hlen = (uint16)IPV6_EXTHDR_LEN(eh); len += hlen; eh = (struct ipv6_exthdr *)(h + len); diff --git a/bcmdhd.100.10.315.x/include/bcmipv6.h b/bcmdhd.100.10.315.x/include/bcmipv6.h index c6f1f05..4955d21 100644 --- a/bcmdhd.100.10.315.x/include/bcmipv6.h +++ b/bcmdhd.100.10.315.x/include/bcmipv6.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to Neighbor Discovery Protocol * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmmsgbuf.h b/bcmdhd.100.10.315.x/include/bcmmsgbuf.h index d9ff587..6c9eaa0 100644 --- a/bcmdhd.100.10.315.x/include/bcmmsgbuf.h +++ b/bcmdhd.100.10.315.x/include/bcmmsgbuf.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: bcmmsgbuf.h 764293 2018-05-24 17:44:43Z $ + * $Id: bcmmsgbuf.h 814986 2019-04-15 21:18:21Z $ */ #ifndef _bcmmsgbuf_h_ #define _bcmmsgbuf_h_ @@ -69,6 +69,7 @@ #define H2DRING_DYNAMIC_INFO_MAX_ITEM 32 #define D2HRING_DYNAMIC_INFO_MAX_ITEM 32 +#define D2HRING_EDL_HDR_SIZE 48u #define D2HRING_EDL_ITEMSIZE 2048u #define D2HRING_EDL_MAX_ITEM 256u #define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u) @@ -433,6 +434,7 @@ typedef struct pcie_dma_xfer_params { uint8 flags; } pcie_dma_xfer_params_t; +#define BCMPCIE_FLOW_RING_INTF_HP2P 0x1 /** Complete msgbuf hdr for flow ring update from host to dongle */ typedef struct tx_flowring_create_request { cmn_msg_hdr_t msg; @@ -580,8 +582,15 @@ typedef struct info_ring_submit_item { /** Control Completion messages (20 bytes) */ typedef struct compl_msg_hdr { - /** status for the completion */ - int16 status; + union { + /** status for the completion */ + int16 status; + + /* mutually exclusive with pkt fate debug feature */ + struct pktts_compl_hdr { + uint16 d_t4; /* Delta TimeStamp 3: T4-tref */ + } tx_pktts; + }; /** submisison flow ring id which generated this status */ union { uint16 ring_id; @@ -633,6 +642,12 @@ typedef ts_timestamp_t ts_timestamp_ns_64_t; typedef ts_timestamp_t ts_correction_m_t; typedef ts_timestamp_t ts_correction_b_t; +typedef struct _pktts { + uint32 tref; /* Ref Clk in uSec (currently, tsf) */ + uint16 d_t2; /* Delta TimeStamp 1: T2-tref */ + uint16 d_t3; /* Delta TimeStamp 2: T3-tref */ +} pktts_t; + /* completion header status codes */ #define BCMPCIE_SUCCESS 0 #define BCMPCIE_NOTFOUND 1 @@ -924,11 +939,23 @@ typedef struct host_rxbuf_cmpl { /** rx status */ uint32 rx_status_0; uint32 rx_status_1; - /** XOR checksum or a magic number to audit DMA done */ - /* This is for rev6 only. For IPC rev7, this is a reserved field */ - dma_done_t marker; - /* timestamp */ - ipc_timestamp_t ts; + + union { /* size per IPC = (3 x uint32) bytes */ + struct { + /* used by Monitor mode */ + uint32 marker; + /* timestamp */ + ipc_timestamp_t ts; + }; + + /* LatTS_With_XORCSUM */ + struct { + /* latency timestamp */ + pktts_t rx_pktts; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker_ext; + }; + }; } host_rxbuf_cmpl_t; typedef union rxbuf_complete_item { @@ -958,11 +985,11 @@ typedef struct host_txbuf_post { struct { /** extended transmit flags */ uint8 ext_flags; - uint8 rsvd1; + uint8 scale_factor; /** user defined rate */ uint8 rate; - uint8 rsvd2; + uint8 exp_time; }; /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; @@ -980,6 +1007,9 @@ typedef struct host_txbuf_post { #define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */ #define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */ +#define BCMPCIE_PKT_FLAGS_EPOCH_SHIFT 3u +#define BCMPCIE_PKT_FLAGS_EPOCH_MASK (1u << BCMPCIE_PKT_FLAGS_EPOCH_SHIFT) + #define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 #define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) #define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00 @@ -1007,23 +1037,33 @@ typedef struct host_txbuf_cmpl { cmn_msg_hdr_t cmn_hdr; /** completion message header */ compl_msg_hdr_t compl_hdr; - union { + + union { /* size per IPC = (3 x uint32) bytes */ + /* Usage 1: TxS_With_TimeSync */ + struct { + struct { + union { + /** provided meta data len */ + uint16 metadata_len; + /** provided extended TX status */ + uint16 tx_status_ext; + }; /*Ext_TxStatus */ + + /** WLAN side txstatus */ + uint16 tx_status; + }; /* TxS */ + /* timestamp */ + ipc_timestamp_t ts; + }; /* TxS_with_TS */ + + /* Usage 2: LatTS_With_XORCSUM */ struct { - union { - /** provided meta data len */ - uint16 metadata_len; - /** provided extended TX status */ - uint16 tx_status_ext; - }; - /** WLAN side txstatus */ - uint16 tx_status; + /* latency timestamp */ + pktts_t tx_pktts; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker_ext; }; - /** XOR checksum or a magic number to audit DMA done */ - /* This is for rev6 only. For IPC rev7, this is not used */ - dma_done_t marker; }; - /* timestamp */ - ipc_timestamp_t ts; } host_txbuf_cmpl_t; @@ -1032,6 +1072,20 @@ typedef union txbuf_complete_item { unsigned char check[D2HRING_TXCMPLT_ITEMSIZE]; } txbuf_complete_item_t; +#define PCIE_METADATA_VER 1u + +/* version and length are not part of this structure. + * dhd queries version and length through bus iovar "bus:metadata_info". + */ +struct metadata_txcmpl_v1 { + uint32 tref; /* TSF or Ref Clock in uSecs */ + uint16 d_t2; /* T2-fwt1 delta */ + uint16 d_t3; /* T3-fwt1 delta */ + uint16 d_t4; /* T4-fwt1 delta */ + uint16 rsvd; /* reserved */ +}; +typedef struct metadata_txcmpl_v1 metadata_txcmpl_t; + #define BCMPCIE_D2H_METADATA_HDRLEN 4 #define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) diff --git a/bcmdhd.100.10.315.x/include/bcmnvram.h b/bcmdhd.100.10.315.x/include/bcmnvram.h index 5289244..6b5a75f 100644 --- a/bcmdhd.100.10.315.x/include/bcmnvram.h +++ b/bcmdhd.100.10.315.x/include/bcmnvram.h @@ -1,7 +1,7 @@ /* * NVRAM variable manipulation * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmpcie.h b/bcmdhd.100.10.315.x/include/bcmpcie.h index fdc1d6e..6fed0a6 100644 --- a/bcmdhd.100.10.315.x/include/bcmpcie.h +++ b/bcmdhd.100.10.315.x/include/bcmpcie.h @@ -3,7 +3,7 @@ * Software-specific definitions shared between device and host side * Explains the shared area between host and dongle * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmpcie.h 765083 2018-05-31 02:45:13Z $ + * $Id: bcmpcie.h 821465 2019-05-23 19:50:00Z $ */ #ifndef _bcmpcie_h_ @@ -81,13 +81,6 @@ typedef struct { #define PCIE_SHARED_IDLE_FLOW_RING 0x80000 #define PCIE_SHARED_2BYTE_INDICES 0x100000 -#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */ -#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 -#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */ -#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */ -#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */ -#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */ -#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */ #define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */ #define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */ #define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */ @@ -135,35 +128,39 @@ typedef struct { * field got added and the definition for these flags come here: */ /* WAR: D11 txstatus through unused status field of PCIe completion header */ -#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000 -#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000 +#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */ +#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 +#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */ +#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */ +#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */ +#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 /* Fast Delete ring support */ +#define PCIE_SHARED2_EVTBUF_MAX_MASK 0x000000C0 /* 0:32, 1:64, 2:128, 3: 256 */ -#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 +/* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */ +#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100 +#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */ +#define PCIE_SHARED2_FW_HC_ON_TRAP 0x00000400 +#define PCIE_SHARED2_HSCB 0x00000800 /* Host SCB support */ -#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 +#define PCIE_SHARED2_EDL_RING 0x00001000 /* Support Enhanced Debug Lane */ +#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */ +#define PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x00004000 /* BT producer index reset WAR */ +#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000 /* Timestamp in packet */ -/* BT logging support */ -#define PCIE_SHARED2_BT_LOGGING 0x00000004 -/* BT/WLAN snapshot upload support */ -#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 -/* submission count WAR */ -#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 +#define PCIE_SHARED2_HP2P 0x00010000u /* HP2P feature */ +#define PCIE_SHARED2_HWA 0x00020000u /* HWA feature */ +#define PCIE_SHARED2_TRAP_ON_HOST_DB7 0x00040000u /* can take a trap on DB7 from host */ -/* Fast Delete ring support */ -#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 +#define PCIE_SHARED2_DURATION_SCALE 0x00100000u -/* Host SCB support */ -#define PCIE_SHARED2_HSCB 0x00000800 +#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000 +#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000 #define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 #define PCIE_SHARED_H2D_MAGIC 0x12345678 -#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100 /* using flags2 to indicate - firmware support added to reuse - timesync to update PKT txstatus - */ -/* Support Enhanced Debug Lane */ -#define PCIE_SHARED2_EDL_RING 0x00001000 +typedef uint16 pcie_hwa_db_index_t; /* 16 bit HWA index (IPC Rev 7) */ +#define PCIE_HWA_DB_INDEX_SZ (2u) /* 2 bytes sizeof(pcie_hwa_db_index_t) */ /** * Message rings convey messages between host and device. They are unidirectional, and are located @@ -198,8 +195,10 @@ typedef struct { #define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3 #define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4 #define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5 -#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6 +#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6 #define BCMPCIE_D2H_RING_TYPE_EDL 0x7 +#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8 +#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9 /** * H2D and D2H, WR and RD index, are maintained in the following arrays: @@ -317,6 +316,11 @@ typedef struct ring_info { uint16 max_vdevs; /* max number of virtual interfaces supported */ sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */ + + /* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */ + uint32 h2d_hwa_db_idx_ptr; /* Array of all H2D ring's HWA DB indices */ + uint32 d2h_hwa_db_idx_ptr; /* Array of all D2H ring's HWA DB indices */ + } ring_info_t; /** @@ -385,6 +389,9 @@ typedef struct { /* location in host memory for offloaded modules */ sh_addr_t hoffload_addr; + uint32 flags3; + uint32 host_cap2; + uint32 host_cap3; } pciedev_shared_t; /* Device F/W provides the following access function: @@ -414,6 +421,10 @@ typedef struct { #define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000 /* Host support for enhanced debug lane */ #define HOSTCAP_EDL_RING 0x10000000 +#define HOSTCAP_PKT_TIMESTAMP 0x20000000 +#define HOSTCAP_PKT_HP2P 0x40000000 +#define HOSTCAP_HWA 0x80000000 +#define HOSTCAP2_DURATION_SCALE_MASK 0x0000003Fu /* extended trap debug buffer allocation sizes. Note that this buffer can be used for * other trap related purposes also. @@ -450,31 +461,34 @@ typedef struct { /* D2H mail box Data */ #define D2H_DEV_D3_ACK 0x00000001 -#define D2H_DEV_DS_ENTER_REQ 0x00000002 -#define D2H_DEV_DS_EXIT_NOTE 0x00000004 -#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008 #define D2H_DEV_IDMA_INITED 0x00000010 +#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK +#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ +#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE + +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/* trap data codes */ #define D2H_DEV_FWHALT 0x10000000 -#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000 -#define D2H_DEV_EXT_TRAP_DATA 0x20000000 -#define D2H_DEV_TRAP_IN_TRAP 0x40000000 -#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000 +#define D2H_DEV_EXT_TRAP_DATA 0x20000000 +#define D2H_DEV_TRAP_IN_TRAP 0x40000000 +#define D2H_DEV_TRAP_HOSTDB 0x80000000 /* trap as set by host DB */ +#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000 /* Indicates trap due to HMAP violation */ -#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000 +#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000 /* Indicates whether HMAP violation was Write */ -#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000 +#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000 +#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000 +#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */ -#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK -#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ -#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE #define D2HMB_FWHALT D2H_DEV_FWHALT #define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP #define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA -#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */ -#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ - D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \ - D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA | D2H_DEV_TRAP_IN_TRAP) -#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) /* Size of Extended Trap data Buffer */ #define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096 diff --git a/bcmdhd.100.10.315.x/include/bcmpcispi.h b/bcmdhd.100.10.315.x/include/bcmpcispi.h index 3ca211b..8a8da07 100644 --- a/bcmdhd.100.10.315.x/include/bcmpcispi.h +++ b/bcmdhd.100.10.315.x/include/bcmpcispi.h @@ -1,7 +1,7 @@ /* * Broadcom PCI-SPI Host Controller Register Definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmperf.h b/bcmdhd.100.10.315.x/include/bcmperf.h index 93cbdd3..4bd807d 100644 --- a/bcmdhd.100.10.315.x/include/bcmperf.h +++ b/bcmdhd.100.10.315.x/include/bcmperf.h @@ -1,7 +1,7 @@ /* * Performance counters software interface. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsdbus.h b/bcmdhd.100.10.315.x/include/bcmsdbus.h index ba857cc..d85f144 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdbus.h +++ b/bcmdhd.100.10.315.x/include/bcmsdbus.h @@ -2,7 +2,7 @@ * Definitions for API from sdio common code (bcmsdh) to individual * host controller drivers. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsdh.h b/bcmdhd.100.10.315.x/include/bcmsdh.h index 0d74a65..9be228d 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdh.h +++ b/bcmdhd.100.10.315.x/include/bcmsdh.h @@ -3,7 +3,7 @@ * export functions to client drivers * abstract OS and BUS specific details of SDIO * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h b/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h index 4265bc6..51e6f89 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h +++ b/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsdpcm.h b/bcmdhd.100.10.315.x/include/bcmsdpcm.h index c5665f1..855dd68 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdpcm.h +++ b/bcmdhd.100.10.315.x/include/bcmsdpcm.h @@ -2,7 +2,7 @@ * Broadcom SDIO/PCMCIA * Software-specific definitions shared between device and host side * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -284,6 +284,7 @@ typedef volatile struct { #define SDPCM_SHARED_SET_BRPT 0x1000 #define SDPCM_SHARED_PENDING_BRPT 0x2000 #define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000 +#define SDPCM_SHARED_RXLIM_POST 0x4000 typedef struct { uint32 flags; diff --git a/bcmdhd.100.10.315.x/include/bcmsdspi.h b/bcmdhd.100.10.315.x/include/bcmsdspi.h index 338066d..1c82867 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdspi.h +++ b/bcmdhd.100.10.315.x/include/bcmsdspi.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsdstd.h b/bcmdhd.100.10.315.x/include/bcmsdstd.h index ab79205..8e2dc95 100644 --- a/bcmdhd.100.10.315.x/include/bcmsdstd.h +++ b/bcmdhd.100.10.315.x/include/bcmsdstd.h @@ -1,7 +1,7 @@ /* * 'Standard' SDIO HOST CONTROLLER driver * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmspi.h b/bcmdhd.100.10.315.x/include/bcmspi.h index 92a7da1..737e479 100644 --- a/bcmdhd.100.10.315.x/include/bcmspi.h +++ b/bcmdhd.100.10.315.x/include/bcmspi.h @@ -1,7 +1,7 @@ /* * Broadcom SPI Low-Level Hardware Driver API * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmspibrcm.h b/bcmdhd.100.10.315.x/include/bcmspibrcm.h index fe3a0d7..6ce5c88 100644 --- a/bcmdhd.100.10.315.x/include/bcmspibrcm.h +++ b/bcmdhd.100.10.315.x/include/bcmspibrcm.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h b/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h index 2bb66ab..0fcf97a 100644 --- a/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h +++ b/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h @@ -1,7 +1,7 @@ /* * SROM format definition. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h b/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h index 30e47e8..d2294d1 100644 --- a/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h +++ b/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h @@ -1,7 +1,7 @@ /* * Table that encodes the srom formats for PCI/PCIe NICs. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmstdlib_s.h b/bcmdhd.100.10.315.x/include/bcmstdlib_s.h index 4c38259..a364f6b 100644 --- a/bcmdhd.100.10.315.x/include/bcmstdlib_s.h +++ b/bcmdhd.100.10.315.x/include/bcmstdlib_s.h @@ -1,7 +1,7 @@ /* * Broadcom Secure Standard Library. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -30,6 +30,7 @@ #ifndef _bcmstdlib_s_h_ #define _bcmstdlib_s_h_ +#ifndef BWL_NO_INTERNAL_STDLIB_SUPPORT #if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \ defined(__STDC_WANT_LIB_EXT1__)) extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n); @@ -40,5 +41,5 @@ extern int memset_s(void *dest, size_t destsz, int c, size_t n); extern size_t strlcpy(char *dest, const char *src, size_t size); #endif // endif extern size_t strlcat_s(char *dest, const char *src, size_t size); - +#endif /* !BWL_NO_INTERNAL_STDLIB_SUPPORT */ #endif /* _bcmstdlib_s_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmtcp.h b/bcmdhd.100.10.315.x/include/bcmtcp.h index dd8b9c9..22d319a 100644 --- a/bcmdhd.100.10.315.x/include/bcmtcp.h +++ b/bcmdhd.100.10.315.x/include/bcmtcp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to TCP Protocol * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmtlv.h b/bcmdhd.100.10.315.x/include/bcmtlv.h index 0218765..421df10 100644 --- a/bcmdhd.100.10.315.x/include/bcmtlv.h +++ b/bcmdhd.100.10.315.x/include/bcmtlv.h @@ -1,7 +1,7 @@ /* * TLV and XTLV support * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -85,6 +85,12 @@ typedef struct bcm_tlv_ext { /* find the next tlv */ bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen); +/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */ +void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen); + +/* move buffer/buflen past the given tlv, or set to NULL/0 on error */ +void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen); + /* find the tlv for a given id */ bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key); @@ -139,6 +145,8 @@ typedef struct bcm_xtlv { #define BCM_XTLV_OPTION_ALIGN32 0x0001 /* 32bit alignment of type.len.data */ #define BCM_XTLV_OPTION_IDU8 0x0002 /* shorter id */ #define BCM_XTLV_OPTION_LENU8 0x0004 /* shorted length */ +#define BCM_XTLV_OPTION_IDBE 0x0008 /* big endian format id */ +#define BCM_XTLV_OPTION_LENBE 0x0010 /* big endian format length */ typedef uint16 bcm_xtlv_opts_t; /* header size. depends on options. Macros names ending w/ _EX are where diff --git a/bcmdhd.100.10.315.x/include/bcmudp.h b/bcmdhd.100.10.315.x/include/bcmudp.h index dc0d488..0da9775 100644 --- a/bcmdhd.100.10.315.x/include/bcmudp.h +++ b/bcmdhd.100.10.315.x/include/bcmudp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to UDP Protocol * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/bcmutils.h b/bcmdhd.100.10.315.x/include/bcmutils.h index e750f25..443f7bf 100644 --- a/bcmdhd.100.10.315.x/include/bcmutils.h +++ b/bcmdhd.100.10.315.x/include/bcmutils.h @@ -1,7 +1,7 @@ /* * Misc useful os-independent macros and functions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmutils.h 769659 2018-06-27 05:22:10Z $ + * $Id: bcmutils.h 813798 2019-04-08 10:20:21Z $ */ #ifndef _bcmutils_h_ @@ -165,12 +165,22 @@ extern void pktset8021xprio(void *pkt, int prio); #define DSCP_AF21 0x12 #define DSCP_AF22 0x14 #define DSCP_AF23 0x16 +/* CS2: OAM (RFC2474) */ +#define DSCP_CS2 0x10 /* AF3x: Multimedia Streaming (RFC2597) */ #define DSCP_AF31 0x1A #define DSCP_AF32 0x1C #define DSCP_AF33 0x1E +/* CS3: Broadcast Video (RFC2474) */ +#define DSCP_CS3 0x18 +/* VA: VOCIE-ADMIT (RFC5865) */ +#define DSCP_VA 0x2C /* EF: Telephony (RFC3246) */ #define DSCP_EF 0x2E +/* CS6: Network Control (RFC2474) */ +#define DSCP_CS6 0x30 +/* CS7: Network Control (RFC2474) */ +#define DSCP_CS7 0x38 extern uint pktsetprio(void *pkt, bool update_vtag); extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); @@ -580,7 +590,7 @@ int bcmstrnicmp(const char* s1, const char* s2, int cnt); #define SIZE_OF(type, field) sizeof(((type *)0)->field) #ifndef ARRAYSIZE -#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) +#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0])) #endif // endif #ifndef ARRAYLAST /* returns pointer to last array element */ @@ -663,9 +673,9 @@ static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \ return ((*a >> pos) & MSK); \ } -DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */ -DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */ -DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */ +DECLARE_MAP_API(2, 4, 1, 15U, 0x0003U) /* setbit2() and getbit2() */ +DECLARE_MAP_API(4, 3, 2, 7U, 0x000FU) /* setbit4() and getbit4() */ +DECLARE_MAP_API(8, 2, 3, 3U, 0x00FFU) /* setbit8() and getbit8() */ /* basic mux operation - can be optimized on several architectures */ #define MUX(pred, true, false) ((pred) ? (true) : (false)) @@ -838,7 +848,8 @@ extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, stru char *buf, uint32 bufsize); extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); -extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) + __attribute__ ((format (__printf__, 2, 0))); /* power conversion */ extern uint16 bcm_qdbm_to_mw(uint8 qdbm); @@ -920,7 +931,7 @@ C_bcm_count_leading_zeros(uint32 u32arg) while (u32arg) { shifts++; u32arg >>= 1; } - return (32U - shifts); + return (32 - shifts); } /* the format of current TCM layout during boot @@ -1267,6 +1278,7 @@ void counter_printlog(counter_tbl_t *ctr_tbl); #endif // endif #ifdef SHOW_LOGTRACE #define TRACE_LOG_BUF_MAX_SIZE 1700 +#define RTT_LOG_BUF_MAX_SIZE 1700 #define BUF_NOT_AVAILABLE 0 #define NEXT_BUF_NOT_AVAIL 1 #define NEXT_BUF_AVAIL 2 diff --git a/bcmdhd.100.10.315.x/include/brcm_nl80211.h b/bcmdhd.100.10.315.x/include/brcm_nl80211.h index eb04883..c585b0e 100644 --- a/bcmdhd.100.10.315.x/include/brcm_nl80211.h +++ b/bcmdhd.100.10.315.x/include/brcm_nl80211.h @@ -1,7 +1,7 @@ /* * Definitions for nl80211 vendor command/event access to host driver * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: brcm_nl80211.h 768773 2018-06-21 08:38:23Z $ + * $Id: brcm_nl80211.h 787269 2018-11-01 11:46:31Z $ * */ @@ -35,18 +35,13 @@ #define OUI_GOOGLE 0x001A11 enum wl_vendor_subcmd { - BRCM_VENDOR_SCMD_UNSPEC, - BRCM_VENDOR_SCMD_PRIV_STR, - BRCM_VENDOR_SCMD_BCM_STR, - BRCM_VENDOR_SCMD_BCM_PSK -}; - -enum brcm_nl80211_vendor_events { - BRCM_VENDOR_EVENT_UNSPEC, - BRCM_VENDOR_EVENT_PRIV_STR, - BRCM_VENDOR_EVENT_HANGED = 33, - BRCM_VENDOR_EVENT_SAE_KEY = 34, - BRCM_VENDOR_EVENT_BEACON_RECV = 35 + BRCM_VENDOR_SCMD_UNSPEC = 0, + BRCM_VENDOR_SCMD_PRIV_STR = 1, + BRCM_VENDOR_SCMD_BCM_STR = 2, + BRCM_VENDOR_SCMD_BCM_PSK = 3, + BRCM_VENDOR_SCMD_SET_PMK = 4, + BRCM_VENDOR_SCMD_GET_FEATURES = 5, + BRCM_VENDOR_SCMD_MAX = 6 }; struct bcm_nlmsg_hdr { diff --git a/bcmdhd.100.10.315.x/include/dbus.h b/bcmdhd.100.10.315.x/include/dbus.h index e8bd5d7..ff629ef 100644 --- a/bcmdhd.100.10.315.x/include/dbus.h +++ b/bcmdhd.100.10.315.x/include/dbus.h @@ -2,7 +2,7 @@ * Dongle BUS interface Abstraction layer * target serial buses like USB, SDIO, SPI, etc. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/dhd_daemon.h b/bcmdhd.100.10.315.x/include/dhd_daemon.h index 06aa182..b324b51 100644 --- a/bcmdhd.100.10.315.x/include/dhd_daemon.h +++ b/bcmdhd.100.10.315.x/include/dhd_daemon.h @@ -1,7 +1,7 @@ /* * Header file for DHD daemon to handle timeouts * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_daemon.h 671442 2016-11-22 05:16:18Z $ + * $Id: dhd_daemon.h 802947 2019-02-05 14:53:08Z $ */ #ifndef __BCM_DHDD_H__ @@ -40,7 +40,11 @@ #define NO_TRAP 0 #define DO_TRAP 1 -#define BCM_NL_USER 31 +/* Keep common BCM netlink macros here */ +#define BCM_NL_USER 31 +#define BCM_NL_OXYGEN 30 +#define BCM_NL_TS 29 +/* ====== !! ADD NEW NL socket related defines here !! ====== */ typedef enum notify_dhd_daemon_reason { REASON_COMMAND_TO, diff --git a/bcmdhd.100.10.315.x/include/dhdioctl.h b/bcmdhd.100.10.315.x/include/dhdioctl.h index 458e14f..8e74513 100644 --- a/bcmdhd.100.10.315.x/include/dhdioctl.h +++ b/bcmdhd.100.10.315.x/include/dhdioctl.h @@ -5,7 +5,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -28,7 +28,7 @@ * * <> * - * $Id: dhdioctl.h 765806 2018-06-05 13:56:08Z $ + * $Id: dhdioctl.h 800512 2019-01-22 09:31:01Z $ */ #ifndef _dhdioctl_h_ @@ -60,6 +60,33 @@ typedef enum { DMA_XFER_FAILED } dma_xfer_status_t; +typedef enum d11_lpbk_type { + M2M_DMA_LPBK = 0, + D11_LPBK = 1, + BMC_LPBK = 2, + M2M_NON_DMA_LPBK = 3, + D11_HOST_MEM_LPBK = 4, + BMC_HOST_MEM_LPBK = 5, + MAX_LPBK = 6 +} dma_xfer_type_t; + +typedef struct dmaxfer_info { + uint16 version; + uint16 length; + dma_xfer_status_t status; + dma_xfer_type_t type; + uint src_delay; + uint dest_delay; + uint should_wait; + uint core_num; + int error_code; + uint32 num_bytes; + uint64 time_taken; + uint64 tput; +} dma_xfer_info_t; + +#define DHD_DMAXFER_VERSION 0x1 + typedef struct tput_test { uint16 version; uint16 length; @@ -239,4 +266,5 @@ typedef struct bt_mem_req { typedef struct debug_buf_dest_stat { uint32 stat[DEBUG_BUF_DEST_MAX]; } debug_buf_dest_stat_t; + #endif /* _dhdioctl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/dnglevent.h b/bcmdhd.100.10.315.x/include/dnglevent.h index 4919af7..77c3f50 100644 --- a/bcmdhd.100.10.315.x/include/dnglevent.h +++ b/bcmdhd.100.10.315.x/include/dnglevent.h @@ -1,7 +1,9 @@ /* - * Broadcom Event protocol definitions + * Broadcom Event protocol definitions * - * Copyright (C) 1999-2018, Broadcom. + * Dependencies: bcmeth.h + * + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -21,12 +23,11 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * Dependencies: bcmeth.h - * - * $Id: dnglevent.h $ * * <> * + * $Id: dnglevent.h $ + * * ----------------------------------------------------------------------------- * */ @@ -108,8 +109,9 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck { #define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5 #define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6 #define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7 +#define HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND 0x8 -#define HC_PCIEDEV_CONFIG_REGLIST_MAX 20 +#define HC_PCIEDEV_CONFIG_REGLIST_MAX 25 typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc { uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */ uint16 reserved; diff --git a/bcmdhd.100.10.315.x/include/eapol.h b/bcmdhd.100.10.315.x/include/eapol.h index 5b11964..ace6fda 100644 --- a/bcmdhd.100.10.315.x/include/eapol.h +++ b/bcmdhd.100.10.315.x/include/eapol.h @@ -5,7 +5,7 @@ * IEEE Std 802.1X-2001 * IEEE 802.1X RADIUS Usage Guidelines * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -28,7 +28,7 @@ * * <> * - * $Id: eapol.h 767212 2018-06-13 00:17:23Z $ + * $Id: eapol.h 809460 2019-03-14 00:35:24Z $ */ #ifndef _eapol_h_ @@ -125,15 +125,29 @@ typedef BWL_PRE_PACKED_STRUCT struct { #define EAPOL_WPA_PMK_SHA384_LEN 48u #define EAPOL_WPA_PMK_DEFAULT_LEN 32u #define EAPOL_WPA_KCK_DEFAULT_LEN 16u +#define EAPOL_WPA_KCK_SHA384_LEN 24u #define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u +#define EAPOL_WPA_KCK_MIC_SHA384_LEN 24u #define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u +#define EAPOL_WPA_KEK2_SHA256_LEN 16u +#define EAPOL_WPA_KEK2_SHA384_LEN 32u +#define EAPOL_WPA_KCK2_SHA256_LEN 16u +#define EAPOL_WPA_KCK2_SHA384_LEN 24u + #ifndef EAPOL_KEY_HDR_VER_V2 #define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */ #define EAPOL_WPA_KEY_LEN 95u /* deprecated */ #endif // endif +#define EAPOL_PTK_KEY_MAX_LEN (EAPOL_WPA_KEY_MAX_MIC_LEN +\ + EAPOL_WPA_ENCR_KEY_MAX_LEN +\ + EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN +\ + EAPOL_WPA_KCK2_SHA384_LEN +\ + EAPOL_WPA_KEK2_SHA384_LEN) + #ifndef EAPOL_KEY_HDR_VER_V2 + /* WPA EAPOL-Key : deprecated */ typedef BWL_PRE_PACKED_STRUCT struct { unsigned char type; /* Key Descriptor Type */ @@ -196,6 +210,8 @@ typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t; #define WPA_KEY_SECURE 0x200 #define WPA_KEY_ERROR 0x400 #define WPA_KEY_REQ 0x800 +#define WPA_KEY_ENC_KEY_DATA 0x01000 /* Encrypted Key Data */ +#define WPA_KEY_SMK_MESSAGE 0x02000 /* SMK Message */ #define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u) #define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2 diff --git a/bcmdhd.100.10.315.x/include/epivers.h b/bcmdhd.100.10.315.x/include/epivers.h index 0edcbdd..abaa5a7 100644 --- a/bcmdhd.100.10.315.x/include/epivers.h +++ b/bcmdhd.100.10.315.x/include/epivers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -33,19 +33,19 @@ #define EPI_MINOR_VERSION 10 -#define EPI_RC_NUMBER 315 +#define EPI_RC_NUMBER 545 #define EPI_INCREMENTAL_NUMBER 0 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 100, 10, 315, 0 +#define EPI_VERSION 100, 10, 545, 0 -#define EPI_VERSION_NUM 0x640a13b0 +#define EPI_VERSION_NUM 0x640a2210 -#define EPI_VERSION_DEV 100.10.315 +#define EPI_VERSION_DEV 100.10.545 /* Driver Version String, ASCII, 32 chars max */ -#define EPI_VERSION_STR "100.10.315.3 (r771911)" +#define EPI_VERSION_STR "100.10.545.4 (r826445-20190826-1) (amlogic-dbg-20190830-1)" #endif /* _epivers_h_ */ diff --git a/bcmdhd.100.10.315.x/include/etd.h b/bcmdhd.100.10.315.x/include/etd.h index 6b7b87e..764af85 100644 --- a/bcmdhd.100.10.315.x/include/etd.h +++ b/bcmdhd.100.10.315.x/include/etd.h @@ -1,7 +1,7 @@ /* * Extended Trap data component interface file. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: etd.h 751605 2018-03-13 03:32:30Z $ + * $Id: etd.h 813064 2019-04-03 11:29:38Z $ */ #ifndef _ETD_H_ @@ -78,6 +78,10 @@ typedef enum { TAG_TRAP_LOG_DATA = 20, TAG_TRAP_CODE = 21, /* The trap type */ TAG_TRAP_HMAP = 22, /* HMAP violation Address and Info */ + TAG_TRAP_PCIE_ERR_ATTN = 23, /* PCIE error attn log */ + TAG_TRAP_AXI_ERROR = 24, /* AXI Error */ + TAG_TRAP_AXI_HOST_INFO = 25, /* AXI Host log */ + TAG_TRAP_AXI_SR_ERROR = 26, /* AXI SR error log */ TAG_TRAP_LAST /* This must be the last entry */ } hnd_ext_tag_trap_t; @@ -106,6 +110,42 @@ typedef struct hnd_ext_trap_bp_err uint32 itipoobdout; } hnd_ext_trap_bp_err_t; +#define HND_EXT_TRAP_AXISR_INFO_VER_1 1 +typedef struct hnd_ext_trap_axi_sr_err_v1 +{ + uint8 version; + uint8 pad[3]; + uint32 error; + uint32 coreid; + uint32 baseaddr; + uint32 ioctrl; + uint32 iostatus; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; + + /* axi_sr_issue_debug */ + uint32 sr_pwr_control; + uint32 sr_corereset_wrapper_main; + uint32 sr_corereset_wrapper_aux; + uint32 sr_main_gci_status_0; + uint32 sr_aux_gci_status_0; + uint32 sr_dig_gci_status_0; +} hnd_ext_trap_axi_sr_err_v1_t; + #define HND_EXT_TRAP_PSMWD_INFO_VER 1 typedef struct hnd_ext_trap_psmwd_v1 { uint16 xtag; @@ -226,11 +266,64 @@ typedef struct hnd_ext_trap_wlc_mem_err_v2 { uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; } hnd_ext_trap_wlc_mem_err_v2_t; +#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3 + +typedef struct hnd_ext_trap_wlc_mem_err_v3 { + uint8 version; + uint8 instance; + uint8 stas_associated; + uint8 aps_associated; + uint8 soft_ap_client_cnt; + uint8 peer_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_v3_t; + typedef struct hnd_ext_trap_pcie_mem_err { uint16 d2h_queue_len; uint16 d2h_req_queue_len; } hnd_ext_trap_pcie_mem_err_t; +#define MAX_DMAFIFO_ENTRIES_V1 1 +#define MAX_DMAFIFO_DESC_ENTRIES_V1 2 +#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe +#define HND_EXT_TRAP_AXIERROR_VERSION_1 1 + +/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */ +/* Below three structures are dependant, any change will bump version of all the three */ + +typedef struct hnd_ext_trap_desc_entry_v1 { + uint32 ctrl1; /* descriptor entry at din < misc control bits > */ + uint32 ctrl2; /* descriptor entry at din */ + uint32 addrlo; /* descriptor entry at din
*/ + uint32 addrhi; /* descriptor entry at din
*/ +} dma_dentry_v1_t; + +/* Structure to collect debug info about a dma channel on encountering AXI Error */ +typedef struct hnd_ext_trap_dma_fifo_v1 { + uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */ + uint8 direction; /* TX=1, RX=2, currently only using TX */ + uint16 index; /* Index of the DMA channel in system */ + uint32 dpa; /* Expected Address of Descriptor table from software state */ + uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */ + uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */ + uint16 din; /* rxin / txin */ + uint16 dout; /* rxout / txout */ + dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */ +} dma_fifo_v1_t; + +typedef struct hnd_ext_trap_axi_error_v1 { + uint8 version; /* version = 1 */ + uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */ + uint16 length; /* length of whole structure */ + uint32 signature; /* indicate that its filled with AXI Error data */ + uint32 axi_errorlog_status; /* errlog_status from slave wrapper */ + uint32 axi_errorlog_core; /* errlog_core from slave wrapper */ + uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */ + uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */ + uint32 axi_errorlog_id; /* errlog_id from slave wrapper */ + dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1]; +} hnd_ext_trap_axi_error_v1_t; + #define HND_EXT_TRAP_MACSUSP_INFO_VER 1 typedef struct hnd_ext_trap_macsusp { uint16 xtag; @@ -282,6 +375,7 @@ typedef struct hnd_ext_trap_macenab { uint16 PAD; } hnd_ext_trap_macenab_t; +#define HND_EXT_TRAP_PHY_INFO_VER_1 (1) typedef struct hnd_ext_trap_phydbg { uint16 err; uint16 RxFeStatus; @@ -326,7 +420,7 @@ typedef struct { uint16 core_mask; } reg_dump_config_t; -#define HND_EXT_TRAP_PHY_INFO_VER 2 +#define HND_EXT_TRAP_PHY_INFO_VER 2 typedef struct hnd_ext_trap_phydbg_v2 { uint8 version; uint8 len; @@ -361,6 +455,43 @@ typedef struct hnd_ext_trap_phydbg_v2 { uint32 additional_regs[1]; } hnd_ext_trap_phydbg_v2_t; +#define HND_EXT_TRAP_PHY_INFO_VER_3 (3) +typedef struct hnd_ext_trap_phydbg_v3 { + uint8 version; + uint8 len; + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint32 gpioOut[3]; + uint16 HESigURateFlagStatus; + uint16 HESigUsRateFlagStatus; + uint32 additional_regs[1]; +} hnd_ext_trap_phydbg_v3_t; + /* Phy TxErr Dump Structure */ #define HND_EXT_TRAP_PHYTXERR_INFO_VER 1 #define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2 @@ -440,6 +571,22 @@ typedef struct hnd_ext_trap_macphytxerr_v2 { uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */ } hnd_ext_trap_macphytxerr_v2_t; +#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u) +#define MAX_AER_HDR_LOG_REGS (4u) +typedef struct hnd_ext_trap_pcie_err_attn_v1 { + uint8 version; + uint8 pad[3]; + uint32 err_hdr_logreg1; + uint32 err_hdr_logreg2; + uint32 err_hdr_logreg3; + uint32 err_hdr_logreg4; + uint32 err_code_logreg; + uint32 err_type; + uint32 err_code_state; + uint32 last_err_attn_ts; + uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS]; +} hnd_ext_trap_pcie_err_attn_v1_t; + #define MAX_EVENTLOG_BUFFERS 48 typedef struct eventlog_trapdata_info { uint32 num_elements; diff --git a/bcmdhd.100.10.315.x/include/ethernet.h b/bcmdhd.100.10.315.x/include/ethernet.h index 565fc70..9e0b961 100644 --- a/bcmdhd.100.10.315.x/include/ethernet.h +++ b/bcmdhd.100.10.315.x/include/ethernet.h @@ -1,7 +1,7 @@ /* * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/event_log.h b/bcmdhd.100.10.315.x/include/event_log.h index f051602..9e0b4b0 100644 --- a/bcmdhd.100.10.315.x/include/event_log.h +++ b/bcmdhd.100.10.315.x/include/event_log.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/event_log_payload.h b/bcmdhd.100.10.315.x/include/event_log_payload.h index 2644831..f1f4cf3 100644 --- a/bcmdhd.100.10.315.x/include/event_log_payload.h +++ b/bcmdhd.100.10.315.x/include/event_log_payload.h @@ -4,7 +4,7 @@ * This file describes the payloads of event log entries that are data buffers * rather than formatted string entries. The contents are generally XTLVs. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: event_log_payload.h 768232 2018-06-19 05:28:22Z $ + * $Id: event_log_payload.h 825102 2019-06-12 22:26:41Z $ */ #ifndef _EVENT_LOG_PAYLOAD_H_ @@ -38,6 +38,35 @@ #include #include +/** + * A (legacy) timestamp message + */ +typedef struct ts_message { + uint32 timestamp; + uint32 cyclecount; +} ts_msg_t; + +/** + * Enhanced timestamp message + */ +typedef struct enhanced_ts_message { + uint32 version; + /* More data, depending on version */ + uint8 data[]; +} ets_msg_t; + +#define ENHANCED_TS_MSG_VERSION_1 (1u) + +/** + * Enhanced timestamp message, version 1 + */ +typedef struct enhanced_ts_message_v1 { + uint32 version; + uint32 timestamp; /* PMU time, in milliseconds */ + uint32 cyclecount; + uint32 cpu_freq; +} ets_msg_v1_t; + #define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */ #define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */ #define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */ @@ -186,16 +215,19 @@ typedef struct xtlv_uc_txs { /* Scan flags */ #define SCAN_SUM_CHAN_INFO 0x1 /* Scan_sum flags */ -#define BAND5G_SIB_ENAB 0x2 -#define BAND2G_SIB_ENAB 0x4 -#define PARALLEL_SCAN 0x8 -#define SCAN_ABORT 0x10 +#define BAND5G_SIB_ENAB 0x2 +#define BAND2G_SIB_ENAB 0x4 +#define PARALLEL_SCAN 0x8 +#define SCAN_ABORT 0x10 +#define SC_LOWSPAN_SCAN 0x20 +#define SC_SCAN 0x40 /* scan_channel_info flags */ #define ACTIVE_SCAN_SCN_SUM 0x2 #define SCAN_SUM_WLC_CORE0 0x4 #define SCAN_SUM_WLC_CORE1 0x8 -#define HOME_CHAN 0x10 +#define HOME_CHAN 0x10 +#define SCAN_SUM_SCAN_CORE 0x20 typedef struct wl_scan_ssid_info { @@ -292,39 +324,52 @@ typedef struct wl_chansw_event_log_record_v2 { /* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */ typedef enum { - WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */ - WL_AMPDU_STATS_TYPE_RXMCSx2 = 1, - WL_AMPDU_STATS_TYPE_RXMCSx3 = 2, - WL_AMPDU_STATS_TYPE_RXMCSx4 = 3, - WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */ - WL_AMPDU_STATS_TYPE_RXVHTx2 = 5, - WL_AMPDU_STATS_TYPE_RXVHTx3 = 6, - WL_AMPDU_STATS_TYPE_RXVHTx4 = 7, - WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */ - WL_AMPDU_STATS_TYPE_TXMCSx2 = 9, - WL_AMPDU_STATS_TYPE_TXMCSx3 = 10, - WL_AMPDU_STATS_TYPE_TXMCSx4 = 11, - WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */ - WL_AMPDU_STATS_TYPE_TXVHTx2 = 13, - WL_AMPDU_STATS_TYPE_TXVHTx3 = 14, - WL_AMPDU_STATS_TYPE_TXVHTx4 = 15, - WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */ - WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */ - WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */ - WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */ - WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */ - WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */ - WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */ - WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */ - WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */ - WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */ - WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */ - WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */ - WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */ - WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */ - WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */ - WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */ - WL_AMPDU_STATS_MAX_CNTS = 64 + WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXMCSx2 = 1, + WL_AMPDU_STATS_TYPE_RXMCSx3 = 2, + WL_AMPDU_STATS_TYPE_RXMCSx4 = 3, + WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXVHTx2 = 5, + WL_AMPDU_STATS_TYPE_RXVHTx3 = 6, + WL_AMPDU_STATS_TYPE_RXVHTx4 = 7, + WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXMCSx2 = 9, + WL_AMPDU_STATS_TYPE_TXMCSx3 = 10, + WL_AMPDU_STATS_TYPE_TXMCSx4 = 11, + WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXVHTx2 = 13, + WL_AMPDU_STATS_TYPE_TXVHTx3 = 14, + WL_AMPDU_STATS_TYPE_TXVHTx4 = 15, + WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */ + WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */ + WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */ + WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */ + WL_AMPDU_STATS_TYPE_RX_HE_SUOK = 32, /* DL SU MPDU frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_SU_DENS = 33, /* DL SU AMPDU DENSITY */ + WL_AMPDU_STATS_TYPE_RX_HE_MUMIMOOK = 34, /* DL MUMIMO Frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_MUMIMO_DENS = 35, /* DL MUMIMO AMPDU Density */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMAOK = 36, /* DL OFDMA Frame per MCS */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_DENS = 37, /* DL OFDMA AMPDU Density */ + WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_HIST = 38, /* DL OFDMA frame RU histogram */ + WL_AMPDU_STATS_TYPE_TX_HE_MCSALL = 39, /* TX HE (SU+MU) frames, all rates */ + WL_AMPDU_STATS_TYPE_TX_HE_MCSOK = 40, /* TX HE (SU+MU) frames succeeded */ + WL_AMPDU_STATS_TYPE_TX_HE_MUALL = 41, /* TX MU (UL OFDMA) frames all rates */ + WL_AMPDU_STATS_TYPE_TX_HE_MUOK = 42, /* TX MU (UL OFDMA) frames succeeded */ + WL_AMPDU_STATS_TYPE_TX_HE_RUBW = 43, /* TX UL RU by BW histogram */ + WL_AMPDU_STATS_TYPE_TX_HE_PADDING = 44, /* TX padding total (single value) */ + WL_AMPDU_STATS_MAX_CNTS = 64 } wl_ampdu_stat_enum_t; typedef struct { uint16 type; /* AMPDU statistics sub-type */ @@ -343,6 +388,14 @@ typedef struct { uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1]; } wl_ampdu_stats_aggrsz_t; +/* Sub-block type for WL_IFSTATS_XTLV_HE_TXMU_STATS */ +typedef enum { + /* Reserve 0 to avoid potential concerns */ + WL_HE_TXMU_STATS_TYPE_TIME = 1, /* per-dBm, total usecs transmitted */ + WL_HE_TXMU_STATS_TYPE_PAD_TIME = 2, /* per-dBm, padding usecs transmitted */ +} wl_he_txmu_stat_enum_t; +#define WL_IFSTATS_HE_TXMU_MAX 32u + /* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */ #define WL_MSCH_PROFILER_START 0 /* start event check */ #define WL_MSCH_PROFILER_EXIT 1 /* exit event check */ @@ -796,4 +849,168 @@ typedef struct phy_periodic_log_v1 { phy_periodic_log_core_t phy_perilog_core[1]; } phy_periodic_log_v1_t; +/* Note: The version 2 is reserved for 4357 only. Future chips must not use this version. */ + +#define MAX_CORE_4357 (2u) +#define PHYCAL_LOG_VER2 (2u) +#define PHY_PERIODIC_LOG_VER2 (2u) + +typedef struct { + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ +} phy_periodic_counters_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phycal_log_core_v2 { + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + uint8 baseidx; /* TPC Base index */ + uint8 pad; + int32 rxs; /* FDIQ Slope coeffecient */ +} phycal_log_core_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phycal_log_v2 { + uint8 version; /* Logging structure version */ + uint16 length; /* Length of the entire structure */ + uint8 pad; + phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */ + phycal_log_core_v2_t phycal_log_core[MAX_CORE_4357]; +} phycal_log_v2_t; + +/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */ + +typedef struct phy_periodic_log_v2 { + uint8 version; /* Logging structure version */ + uint16 length; /* Length of the entire structure */ + uint8 pad; + phy_periodic_log_cmn_t phy_perilog_cmn; + phy_periodic_counters_v2_t counters_peri_log; + phy_periodic_log_core_t phy_perilog_core[MAX_CORE_4357]; +} phy_periodic_log_v2_t; + +/* Event log payload for enhanced roam log */ +typedef enum { + ROAM_LOG_SCANSTART = 1, /* EVT log for roam scan start */ + ROAM_LOG_SCAN_CMPLT = 2, /* EVT log for roam scan completeted */ + ROAM_LOG_ROAM_CMPLT = 3, /* EVT log for roam done */ + ROAM_LOG_NBR_REQ = 4, /* EVT log for Neighbor REQ */ + ROAM_LOG_NBR_REP = 5, /* EVT log for Neighbor REP */ + ROAM_LOG_BCN_REQ = 6, /* EVT log for BCNRPT REQ */ + ROAM_LOG_BCN_REP = 7, /* EVT log for BCNRPT REP */ + PRSV_PERIODIC_ID_MAX +} prsv_periodic_id_enum_t; + +typedef struct prsv_periodic_log_hdr { + uint8 version; + uint8 id; + uint16 length; +} prsv_periodic_log_hdr_t; + +#define ROAM_LOG_VER_1 (1u) +#define ROAM_LOG_TRIG_VER (1u) +#define ROAM_SSID_LEN (32u) +typedef struct roam_log_trig_v1 { + prsv_periodic_log_hdr_t hdr; + int8 rssi; + uint8 current_cu; + uint8 pad[2]; + uint reason; + int result; + union { + struct { + uint rcvd_reason; + } prt_roam; + struct { + uint8 req_mode; + uint8 token; + uint16 nbrlist_size; + uint32 disassoc_dur; + uint32 validity_dur; + uint32 bss_term_dur; + } bss_trans; + }; +} roam_log_trig_v1_t; + +#define ROAM_LOG_RPT_SCAN_LIST_SIZE 3 +#define ROAM_LOG_INVALID_TPUT 0xFFFFFFFFu +typedef struct roam_scan_ap_info { + int8 rssi; + uint8 pad[3]; + uint32 score; + uint16 chanspec; + struct ether_addr addr; + uint32 estm_tput; +} roam_scan_ap_info_t; + +typedef struct roam_log_scan_cmplt_v1 { + prsv_periodic_log_hdr_t hdr; + uint8 full_scan; + uint8 scan_count; + uint8 scan_list_size; + uint8 pad; + int32 score_delta; + roam_scan_ap_info_t cur_info; + roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE]; +} roam_log_scan_cmplt_v1_t; + +typedef struct roam_log_cmplt_v1 { + prsv_periodic_log_hdr_t hdr; + uint status; + uint reason; + uint16 chanspec; + struct ether_addr addr; + uint8 pad[3]; + uint8 retry; +} roam_log_cmplt_v1_t; + +typedef struct roam_log_nbrrep { + prsv_periodic_log_hdr_t hdr; + uint channel_num; +} roam_log_nbrrep_v1_t; + +typedef struct roam_log_nbrreq { + prsv_periodic_log_hdr_t hdr; + uint token; +} roam_log_nbrreq_v1_t; + +typedef struct roam_log_bcnrptreq { + prsv_periodic_log_hdr_t hdr; + int32 result; + uint8 reg; + uint8 channel; + uint8 mode; + uint8 bssid_wild; + uint8 ssid_len; + uint8 pad; + uint16 duration; + uint8 ssid[ROAM_SSID_LEN]; +} roam_log_bcnrpt_req_v1_t; + +typedef struct roam_log_bcnrptrep { + prsv_periodic_log_hdr_t hdr; + uint32 count; +} roam_log_bcnrpt_rep_v1_t; + #endif /* _EVENT_LOG_PAYLOAD_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_log_set.h b/bcmdhd.100.10.315.x/include/event_log_set.h index 44051a9..d559c5d 100644 --- a/bcmdhd.100.10.315.x/include/event_log_set.h +++ b/bcmdhd.100.10.315.x/include/event_log_set.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,21 +24,12 @@ * * <> * - * $Id: event_log_set.h 771154 2018-07-09 05:46:33Z $ + * $Id: event_log_set.h 818566 2019-05-08 04:01:45Z $ */ #ifndef _EVENT_LOG_SET_H_ #define _EVENT_LOG_SET_H_ -#ifndef NUM_EVENT_LOG_SETS -/* Set a maximum number of sets here. It is not dynamic for - * efficiency of the EVENT_LOG calls. Old branches could define - * this to an appropriat enumber in their makefiles to reduce - * ROM invalidation - */ -#define NUM_EVENT_LOG_SETS (24) -#endif // endif - /* Set assignments */ #define EVENT_LOG_SET_BUS (0u) #define EVENT_LOG_SET_WL (1u) @@ -103,6 +94,27 @@ /* Used for timestamp plotting, TS_LOG() */ #define EVENT_LOG_SET_TS_LOG (23u) +/* BUS preserve chatty */ +#define EVENT_LOG_SET_PRSRV_BUS_CHATTY (24u) + +/* PRESERVE_PREIODIC_LOG_SET */ +/* flush if host is in D0 at every period */ +#define EVENT_LOG_SET_PRSV_PERIODIC (25u) + +#ifndef NUM_EVENT_LOG_SETS +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. Old branches could define + * this to an appropriat enumber in their makefiles to reduce + * ROM invalidation + */ +#ifdef NUM_EVENT_LOG_SETS_V2 +/* for v2, everything has became unsigned */ +#define NUM_EVENT_LOG_SETS (26u) +#else /* NUM_EVENT_LOG_SETS_V2 */ +#define NUM_EVENT_LOG_SETS (26) +#endif /* NUM_EVENT_LOG_SETS_V2 */ +#endif /* NUM_EVENT_LOG_SETS */ + /* send delayed logs when >= 50% of buffer is full */ #ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE #define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50) diff --git a/bcmdhd.100.10.315.x/include/event_log_tag.h b/bcmdhd.100.10.315.x/include/event_log_tag.h index 5dcead8..e202216 100644 --- a/bcmdhd.100.10.315.x/include/event_log_tag.h +++ b/bcmdhd.100.10.315.x/include/event_log_tag.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: event_log_tag.h 771154 2018-07-09 05:46:33Z $ + * $Id: event_log_tag.h 820429 2019-05-17 22:30:04Z $ */ #ifndef _EVENT_LOG_TAG_H_ @@ -85,10 +85,6 @@ #define EVENT_LOG_TAG_PCI_DBG 52 #define EVENT_LOG_TAG_PCI_DATA 53 #define EVENT_LOG_TAG_PCI_RING 54 -/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA - * keeping it here to avoid compilation error on trunk - */ -#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 #define EVENT_LOG_TAG_RANGING_TRACE 55 #define EVENT_LOG_TAG_WL_ERROR 56 #define EVENT_LOG_TAG_PHY_ERROR 57 @@ -131,11 +127,9 @@ #define EVENT_LOG_TAG_MIMO_PS_INFO 94 #define EVENT_LOG_TAG_BTCX_STATS 95 #define EVENT_LOG_TAG_LEAKY_AP_STATS 96 -#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 #define EVENT_LOG_TAG_MIMO_PS_STATS 98 #define EVENT_LOG_TAG_PWRSTATS_PHY 99 #define EVENT_LOG_TAG_PWRSTATS_SCAN 100 -#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 #define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 #define EVENT_LOG_TAG_LQM 103 #define EVENT_LOG_TAG_TRACE_WL_INFO 104 @@ -190,13 +184,9 @@ #define EVENT_LOG_TAG_STATS 153 #define EVENT_LOG_TAG_BAM 154 #define EVENT_LOG_TAG_TXFAIL 155 -#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156 -#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157 -#define EVENT_LOG_TAG_AWDL_PEER_DBG 158 #define EVENT_LOG_TAG_RANDMAC_INFO 159 #define EVENT_LOG_TAG_RANDMAC_DBG 160 #define EVENT_LOG_TAG_RANDMAC_ERR 161 -#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162 #define EVENT_LOG_TAG_MSCH_CAL 163 #define EVENT_LOG_TAG_MSCH_OPP_CAL 164 #define EVENT_LOG_TAG_MSCH 165 @@ -231,7 +221,6 @@ #define EVENT_LOG_TAG_PHY_ACI_INFO 208 #define EVENT_LOG_TAG_WL_COUNTERS_AUX 209 #define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210 -#define EVENT_LOG_TAG_PWRSTATS_AWDL_AUX 211 #define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212 #define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213 #define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214 @@ -244,10 +233,27 @@ #define EVENT_LOG_TAG_FILS_ERROR 221 #define EVENT_LOG_TAG_HWA_TXPOST 222 #define EVENT_LOG_TAG_HWA_TXDMA 223 +#define EVENT_LOG_TAG_PPR_ERROR 224 + /* Arbitrator callback log tags */ #define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224 #define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225 #define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226 +#define EVENT_LOG_TAG_RTE_ERROR 227 +#define EVENT_LOG_TAG_CPLT_ERROR 228 +#define EVENT_LOG_TAG_DNGL_ERROR 229 +#define EVENT_LOG_TAG_NVRAM_ERROR 230 +#define EVENT_LOG_TAG_NAC 231 +#define EVENT_LOG_TAG_HP2P_ERR 232 +#define EVENT_LOG_TAG_SB_SCHED_DBG_SYNC 233 +#define EVENT_LOG_TAG_ENHANCED_TS 234 + +/* Available space for new tags for Dingo, Iguana and branches + * prior to Koala only. From Koala onwards, new tags must be greater + * than 255. If a tag is required for Koala and legacy productization branches, + * add that tag here. Tags > 255 will generate extended header. Legacy code + * does not understand extended header. + */ /* Debug tags for making debug builds */ #define EVENT_LOG_TAG_DBG1 251 @@ -333,8 +339,65 @@ /* RTE */ #define EVENT_LOG_TAG_RTE_ERR 312 +/* TX FIFO */ +#define EVENT_LOG_TAG_FIFO_INFO 313 + +/* PKTTS */ +#define EVENT_LOG_TAG_LATENCY_INFO 314 + +/* TDLS */ +#define EVENT_LOG_TAG_WL_TDLS_INFO 315 +#define EVENT_LOG_TAG_WL_TDLS_DBG 316 +#define EVENT_LOG_TAG_WL_TDLS_ERR 317 + +/* MSCH messages */ +#define EVENT_LOG_TAG_MSCH_DATASTRUCT 319 +#define EVENT_LOG_TAG_MSCH_REGISTER 320 +#define EVENT_LOG_TAG_MSCH_CALLBACK 321 +#define EVENT_LOG_TAG_MSCH_ERROR 322 +#define EVENT_LOG_TAG_MSCH_DEBUG 323 +#define EVENT_LOG_TAG_MSCH_INFORM 324 +#define EVENT_LOG_TAG_MSCH_TRACE 325 + +/* bus low power related info messages */ +#define EVENT_LOG_TAG_WL_BUS_LP_INFO 326 +#define EVENT_LOG_TAG_PCI_LP_INFO 327 + +/* SBSS BT-Coex */ +#define EVENT_LOG_TAG_SB_BTCX_INFO 328 + +/* wbus */ +#define EVENT_LOG_TAG_WBUS_ERR 329 +#define EVENT_LOG_TAG_WBUS_INFO 330 +#define EVENT_LOG_TAG_WBUS_SCHED 331 + +/* MODESW */ +#define EVENT_LOG_TAG_MODESW_ERR 332 + +/* LPHS */ +#define EVENT_LOG_TAG_LPHS_ERR 333 + +/* CPU statistics */ +#define EVENT_LOG_TAG_ARM_STAT 334 + +/* Event log tags for SOE */ +#define EVENT_LOG_TAG_SOE_ERROR 335 +#define EVENT_LOG_TAG_SOE_INFO 336 + +/* Event log tags for GCI Shared Memory */ +#define EVENT_LOG_TAG_GCISHM_ERR 337 +#define EVENT_LOG_TAG_GCISHM_INFO 338 + +/* Eevent log tags for Enhanced Roam Log */ +#define EVENT_LOG_TAG_ROAM_ENHANCED_LOG 339 + +/* WL BTCEC */ +#define EVENT_LOG_TAG_BTCEC_ERR 340 +#define EVENT_LOG_TAG_BTCEC_INFO 341 +#define EVENT_LOG_TAG_BTCEC_SCHED 342 + /* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */ -#define EVENT_LOG_TAG_MAX 312 +#define EVENT_LOG_TAG_MAX 342 typedef enum wl_el_set_type_def { EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */ diff --git a/bcmdhd.100.10.315.x/include/event_trace.h b/bcmdhd.100.10.315.x/include/event_trace.h index da34699..fe9bb35 100644 --- a/bcmdhd.100.10.315.x/include/event_trace.h +++ b/bcmdhd.100.10.315.x/include/event_trace.h @@ -1,7 +1,7 @@ /* * Trace log blocks sent over HBUS * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/fils.h b/bcmdhd.100.10.315.x/include/fils.h index 06b983d..0f033f1 100644 --- a/bcmdhd.100.10.315.x/include/fils.h +++ b/bcmdhd.100.10.315.x/include/fils.h @@ -1,6 +1,6 @@ /* * Fundamental types and constants relating to FILS AUTHENTICATION - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_armtrap.h b/bcmdhd.100.10.315.x/include/hnd_armtrap.h index 622fa43..184a7dd 100644 --- a/bcmdhd.100.10.315.x/include/hnd_armtrap.h +++ b/bcmdhd.100.10.315.x/include/hnd_armtrap.h @@ -1,7 +1,7 @@ /* * HND arm trap handling. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_cons.h b/bcmdhd.100.10.315.x/include/hnd_cons.h index c764142..879906d 100644 --- a/bcmdhd.100.10.315.x/include/hnd_cons.h +++ b/bcmdhd.100.10.315.x/include/hnd_cons.h @@ -1,7 +1,7 @@ /* * Console support for RTE - for host use only. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_debug.h b/bcmdhd.100.10.315.x/include/hnd_debug.h index ad191fb..a16123e 100644 --- a/bcmdhd.100.10.315.x/include/hnd_debug.h +++ b/bcmdhd.100.10.315.x/include/hnd_debug.h @@ -1,7 +1,7 @@ /* * HND Run Time Environment debug info area * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_pktpool.h b/bcmdhd.100.10.315.x/include/hnd_pktpool.h index 2d9eaf5..caa436c 100644 --- a/bcmdhd.100.10.315.x/include/hnd_pktpool.h +++ b/bcmdhd.100.10.315.x/include/hnd_pktpool.h @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_pktq.h b/bcmdhd.100.10.315.x/include/hnd_pktq.h index 24a3e42..a7af8ef 100644 --- a/bcmdhd.100.10.315.x/include/hnd_pktq.h +++ b/bcmdhd.100.10.315.x/include/hnd_pktq.h @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hnd_trap.h b/bcmdhd.100.10.315.x/include/hnd_trap.h index 1e2a4a6..02561f3 100644 --- a/bcmdhd.100.10.315.x/include/hnd_trap.h +++ b/bcmdhd.100.10.315.x/include/hnd_trap.h @@ -1,7 +1,7 @@ /* * HND Trap handling. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hndchipc.h b/bcmdhd.100.10.315.x/include/hndchipc.h index 71f26b0..971311d 100644 --- a/bcmdhd.100.10.315.x/include/hndchipc.h +++ b/bcmdhd.100.10.315.x/include/hndchipc.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane chipcommon support - OS independent. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hndlhl.h b/bcmdhd.100.10.315.x/include/hndlhl.h index 3fd9563..8e75e39 100644 --- a/bcmdhd.100.10.315.x/include/hndlhl.h +++ b/bcmdhd.100.10.315.x/include/hndlhl.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane PMU support. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hndmem.h b/bcmdhd.100.10.315.x/include/hndmem.h index 8706bde..f3f3a23 100644 --- a/bcmdhd.100.10.315.x/include/hndmem.h +++ b/bcmdhd.100.10.315.x/include/hndmem.h @@ -1,7 +1,7 @@ /* * Utility routines for configuring different memories in Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hndoobr.h b/bcmdhd.100.10.315.x/include/hndoobr.h new file mode 100644 index 0000000..4e672cc --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndoobr.h @@ -0,0 +1,75 @@ +/* + * HND OOBR interface header + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndoobr.h 772387 2018-07-17 00:58:05Z $ + */ + +#ifndef _hndoobr_h_ +#define _hndoobr_h_ + +#include +#include + +/* for 'srcpidx' of hnd_oobr_get_intr_config() */ +#define HND_CORE_MAIN_INTR 0 +#define HND_CORE_ALT_INTR 1 + +uint32 hnd_oobr_get_intstatus(si_t *sih); +int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx); +int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum); +void hnd_oobr_init(si_t *sih); + +#define OOBR_INVALID_PORT 0xFFu + +/* per core source/dest sel reg */ +#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */ +#define OOBR_INTR_NUM_MASK 0x7Fu +#define OOBR_INTR_EN 0x80u +/* per core config reg */ +#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u +#define OOBR_CORECNF_OUTPUT_SHIFT 8u +#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u +#define OOBR_CORECNF_INPUT_SHIFT 16u + +typedef volatile struct hndoobr_percore_reg { + uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */ + uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */ + uint32 reserved[6]; + uint32 config; /* 0x38 */ + uint32 reserved1[17]; /* 0x3c to 0x7c */ +} hndoobr_percore_reg_t; + +/* capability reg */ +#define OOBR_CAP_CORECNT_MASK 0x1fu +typedef volatile struct hndoobr_reg { + uint32 capability; /* 0x00 */ + uint32 reserved[3]; + uint32 intstatus[4]; /* 0x10 - 0x1c */ + uint32 reserved1[56]; /* 0x20 - 0xfc */ + hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */ +} hndoobr_reg_t; + +#endif /* _hndoobr_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hndpmu.h b/bcmdhd.100.10.315.x/include/hndpmu.h index 025f7a6..2abff46 100644 --- a/bcmdhd.100.10.315.x/include/hndpmu.h +++ b/bcmdhd.100.10.315.x/include/hndpmu.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane PMU support. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/hndsoc.h b/bcmdhd.100.10.315.x/include/hndsoc.h index 46f5acc..35574ea 100644 --- a/bcmdhd.100.10.315.x/include/hndsoc.h +++ b/bcmdhd.100.10.315.x/include/hndsoc.h @@ -1,7 +1,7 @@ /* * Broadcom HND chip & on-chip-interconnect-related definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hndsoc.h 672520 2016-11-28 23:30:55Z $ + * $Id: hndsoc.h 795345 2018-12-18 16:52:03Z $ */ #ifndef _HNDSOC_H @@ -212,6 +212,7 @@ #define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ #define SYSMEM_CORE_ID 0x849 /* System memory core */ #define HUB_CORE_ID 0x84b /* Hub core ID */ +#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */ #define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ #define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ #define EROM_CORE_ID 0x366 /* EROM core ID */ diff --git a/bcmdhd.100.10.315.x/include/linux_osl.h b/bcmdhd.100.10.315.x/include/linux_osl.h index 54d979e..fd4384d 100644 --- a/bcmdhd.100.10.315.x/include/linux_osl.h +++ b/bcmdhd.100.10.315.x/include/linux_osl.h @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: linux_osl.h 749612 2018-03-01 08:51:26Z $ + * $Id: linux_osl.h 815919 2019-04-22 09:06:50Z $ */ #ifndef _linux_osl_h_ @@ -51,7 +51,10 @@ extern int osl_static_mem_deinit(osl_t *osh, void *adapter); extern void osl_set_bus_handle(osl_t *osh, void *bus_handle); extern void* osl_get_bus_handle(osl_t *osh); #ifdef DHD_MAP_LOGGING -extern void osl_dma_map_dump(void); +extern void osl_dma_map_dump(osl_t *osh); +#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh) +#else +#define OSL_DMA_MAP_DUMP(osh) do {} while (0) #endif /* DHD_MAP_LOGGING */ /* Global ASSERT type */ @@ -328,14 +331,15 @@ extern int osl_error(int bcmerror); #include /* use current 2.4.x calling conventions */ #include /* for vsn/printf's */ #include /* for mem*, str* */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) extern uint64 osl_sysuptime_us(void); #define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) #define OSL_SYSUPTIME_US() osl_sysuptime_us() -#else -#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) -#error "OSL_SYSUPTIME_US() may need to be defined" -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */ +extern uint64 osl_localtime_ns(void); +extern void osl_get_localtime(uint64 *sec, uint64 *usec); +extern uint64 osl_systztime_us(void); +#define OSL_LOCALTIME_NS() osl_localtime_ns() +#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec)) +#define OSL_SYSTZTIME_US() osl_systztime_us() #define printf(fmt, args...) printk(fmt , ## args) #include /* for vsn/printf's */ #include /* for mem*, str* */ @@ -561,9 +565,11 @@ typedef struct sk_buff_head PKT_LIST; #define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) #define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) -#ifdef REPORT_FATAL_TIMEOUTS +#ifndef _linuxver_h_ +typedef struct timer_list_compat timer_list_compat_t; +#endif /* _linuxver_h_ */ typedef struct osl_timer { - struct timer_list *timer; + timer_list_compat_t *timer; bool set; } osl_timer_t; @@ -573,7 +579,6 @@ extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(voi extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); extern bool osl_timer_del(osl_t *osh, osl_timer_t *t); -#endif typedef atomic_t osl_atomic_t; #define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x) @@ -585,4 +590,37 @@ typedef atomic_t osl_atomic_t; #define OSL_ATOMIC_READ(osh, v) atomic_read(v) #define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x) +#ifndef atomic_set_mask +#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v) +#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v) +#else +#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v) +#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v) +#endif // endif + +#include + +typedef struct rb_node osl_rb_node_t; +typedef struct rb_root osl_rb_root_t; + +#define OSL_RB_ENTRY(ptr, type, member) rb_entry(ptr, type, member) +#define OSL_RB_INSERT_COLOR(root, node) rb_insert_color(root, node) +#define OSL_RB_ERASE(node, root) rb_erase(node, root) +#define OSL_RB_FIRST(root) rb_first(root) +#define OSL_RB_LAST(root) rb_last(root) +#define OSL_RB_LINK_NODE(node, parent, rb_link) \ + rb_link_node(node, parent, rb_link) + +extern void *osl_spin_lock_init(osl_t *osh); +extern void osl_spin_lock_deinit(osl_t *osh, void *lock); +extern unsigned long osl_spin_lock(void *lock); +extern void osl_spin_unlock(void *lock, unsigned long flags); + +typedef struct osl_timespec { + __kernel_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ + long tv_nsec; /* nanoseconds */ +} osl_timespec_t; +extern void osl_do_gettimeofday(struct osl_timespec *ts); +extern void osl_get_monotonic_boottime(struct osl_timespec *ts); #endif /* _linux_osl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/linux_pkt.h b/bcmdhd.100.10.315.x/include/linux_pkt.h index 7a84b8c..beb4963 100644 --- a/bcmdhd.100.10.315.x/include/linux_pkt.h +++ b/bcmdhd.100.10.315.x/include/linux_pkt.h @@ -1,7 +1,7 @@ /* * Linux Packet (skb) interface * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/linuxver.h b/bcmdhd.100.10.315.x/include/linuxver.h index 2caff4d..75b16aa 100644 --- a/bcmdhd.100.10.315.x/include/linuxver.h +++ b/bcmdhd.100.10.315.x/include/linuxver.h @@ -2,7 +2,7 @@ * Linux-specific abstractions to gain some independence from linux kernel versions. * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: linuxver.h 767291 2018-06-13 06:35:04Z $ + * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $ */ #ifndef _linuxver_h_ @@ -346,6 +346,42 @@ static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, #endif /* DMA mapping */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + +typedef struct timer_list timer_list_compat_t; + +#define init_timer_compat(timer_compat, cb, priv) \ + init_timer(timer_compat); \ + (timer_compat)->data = (ulong)priv; \ + (timer_compat)->function = cb +#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv +#define timer_expires(timer_compat) (timer_compat)->expires + +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */ + +typedef struct timer_list_compat { + struct timer_list timer; + void *arg; + void (*callback)(ulong arg); +} timer_list_compat_t; + +extern void timer_cb_compat(struct timer_list *tl); + +#define init_timer_compat(timer_compat, cb, priv) \ + (timer_compat)->arg = priv; \ + (timer_compat)->callback = cb; \ + timer_setup(&(timer_compat)->timer, timer_cb_compat, 0); +#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv +#define timer_expires(timer_compat) (timer_compat)->timer.expires + +#define del_timer(t) del_timer(&((t)->timer)) +#define del_timer_sync(t) del_timer_sync(&((t)->timer)) +#define timer_pending(t) timer_pending(&((t)->timer)) +#define add_timer(t) add_timer(&((t)->timer)) +#define mod_timer(t, j) mod_timer(&((t)->timer), j) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */ + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) #define dev_kfree_skb_any(a) dev_kfree_skb(a) @@ -632,13 +668,13 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) #define PROC_STOP(tsk_ctl) \ { \ - uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ (tsk_ctl)->terminated = TRUE; \ smp_wmb(); \ up(&((tsk_ctl)->sema)); \ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ - timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ if (timeout == 0) \ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ @@ -653,13 +689,13 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \ { \ - uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ (tsk_ctl)->terminated = TRUE; \ smp_wmb(); \ binary_sema_up(tsk_ctl); \ DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ - timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ if (timeout == 0) \ DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ @@ -679,13 +715,13 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) */ #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \ { \ - uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ (tsk_ctl)->flush_ind = TRUE; \ smp_wmb(); \ binary_sema_up(tsk_ctl); \ DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ - timeout = wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \ + timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \ if (timeout == 0) \ DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ @@ -697,6 +733,11 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) /* ----------------------- */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +/* send_sig declaration moved */ +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */ + #define KILL_PROC(nr, sig) \ { \ struct task_struct *tsk; \ @@ -825,10 +866,19 @@ not match our unaligned address for < 2.6.24 #endif // endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#include static inline struct inode *file_inode(const struct file *f) { return f->f_dentry->d_inode; } #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos) +#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos) +int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count); +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */ +#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */ + #endif /* _linuxver_h_ */ diff --git a/bcmdhd.100.10.315.x/include/lpflags.h b/bcmdhd.100.10.315.x/include/lpflags.h index dad31c5..b270132 100644 --- a/bcmdhd.100.10.315.x/include/lpflags.h +++ b/bcmdhd.100.10.315.x/include/lpflags.h @@ -1,7 +1,7 @@ /* * Chip related low power flags * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/mbo.h b/bcmdhd.100.10.315.x/include/mbo.h index dd638bf..aed093c 100644 --- a/bcmdhd.100.10.315.x/include/mbo.h +++ b/bcmdhd.100.10.315.x/include/mbo.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WFA MBO * (Multiband Operation) - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/miniopt.h b/bcmdhd.100.10.315.x/include/miniopt.h index 87895cb..4a99e70 100644 --- a/bcmdhd.100.10.315.x/include/miniopt.h +++ b/bcmdhd.100.10.315.x/include/miniopt.h @@ -1,7 +1,7 @@ /* * Command line options parser. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/msf.h b/bcmdhd.100.10.315.x/include/msf.h index 2228dc4..3a782a6 100644 --- a/bcmdhd.100.10.315.x/include/msf.h +++ b/bcmdhd.100.10.315.x/include/msf.h @@ -1,7 +1,7 @@ /* * Common interface to MSF (multi-segment format) definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/msgtrace.h b/bcmdhd.100.10.315.x/include/msgtrace.h index 3e143c5..1c684fd 100644 --- a/bcmdhd.100.10.315.x/include/msgtrace.h +++ b/bcmdhd.100.10.315.x/include/msgtrace.h @@ -1,7 +1,7 @@ /* * Trace messages sent over HBUS * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/nan.h b/bcmdhd.100.10.315.x/include/nan.h index 816f13f..488473c 100644 --- a/bcmdhd.100.10.315.x/include/nan.h +++ b/bcmdhd.100.10.315.x/include/nan.h @@ -2,7 +2,7 @@ * Fundamental types and constants relating to WFA NAN * (Neighbor Awareness Networking) * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: nan.h 758133 2018-04-17 19:07:15Z $ + * $Id: nan.h 818571 2019-05-08 04:36:41Z $ */ #ifndef _NAN_H_ #define _NAN_H_ @@ -292,6 +292,16 @@ typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s { uint8 avail_bmp[1]; } BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t; +/* Country code attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_country_code_attr_s { + /* Attribute ID - 0x0B. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Condensed Country String first two octets */ + uint8 country_str[2]; +} BWL_POST_PACKED_STRUCT wifi_nan_country_code_attr_t; + /* Further Availability MAP attr */ typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s { /* Attribute ID - 0x0A. */ @@ -669,7 +679,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s { #define NAN_CHAN_NUM_ENTRIES_MASK 0xF0 typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s { - uint8 band[0]; + uint8 band[1]; } BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t; /* Type of Availability: committed */ @@ -833,6 +843,8 @@ enum NDL_ATTR_TYPE_STATUS_ACCEPTED) #define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ NDL_ATTR_TYPE_STATUS_REJECTED) +#define NAN_NDL_FRM_STATUS(_ndl) \ + (((_ndl)->type_status & NAN_NDL_STATUS_MASK) >> NAN_NDL_STATUS_SHIFT) #define NDL_ATTR_CTRL_NONE 0 #define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT) @@ -1072,6 +1084,10 @@ typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s { /* Schedule Update */ #define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13 +#define NAN_SCHEDULE_AF(_naf_subtype) \ + ((_naf_subtype >= NAN_MGMT_FRM_SUBTYPE_SCHED_REQ) && \ + (_naf_subtype <= NAN_MGMT_FRM_SUBTYPE_SCHED_UPD)) + /* Reason code defines */ #define NAN_REASON_RESERVED 0x0 #define NAN_REASON_UNSPECIFIED 0x1 @@ -1138,11 +1154,24 @@ typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s { NAN_NDP_STATUS_ACCEPT) #define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ NAN_NDP_STATUS_REJECT) + +#define NAN_NDP_FRM_STATUS(_ndp) \ + (((_ndp)->type_status & NAN_NDP_STATUS_MASK) >> NAN_NDP_STATUS_SHIFT) + /* NDP Setup Status */ #define NAN_NDP_SETUP_STATUS_OK 1 #define NAN_NDP_SETUP_STATUS_FAIL 0 #define NAN_NDP_SETUP_STATUS_REJECT 2 +/* NDPE TLV list */ +#define NDPE_TLV_TYPE_IPV6 0x00 +#define NDPE_TLV_TYPE_SVC_INFO 0x01 +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndpe_tlv_s { + uint8 type; /* Operating Class */ + uint16 length; /* Channel Bitmap */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ndpe_tlv_t; + /* Rng setup attribute type and status macros */ #define NAN_RNG_TYPE_MASK 0x0F #define NAN_RNG_TYPE_REQUEST 0x0 diff --git a/bcmdhd.100.10.315.x/include/osl.h b/bcmdhd.100.10.315.x/include/osl.h index 7853c1b..68cde0d 100644 --- a/bcmdhd.100.10.315.x/include/osl.h +++ b/bcmdhd.100.10.315.x/include/osl.h @@ -1,7 +1,7 @@ /* * OS Abstraction Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: osl.h 768071 2018-06-18 09:23:56Z $ + * $Id: osl.h 813810 2019-04-08 12:25:30Z $ */ #ifndef _osl_h_ @@ -90,6 +90,22 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, #define OSL_SYSUPTIME_SUPPORT TRUE #endif /* OSL_SYSUPTIME */ +#ifndef OSL_GET_LOCALTIME +#define OSL_GET_LOCALTIME(sec, usec) \ + do { \ + BCM_REFERENCE(sec); \ + BCM_REFERENCE(usec); \ + } while (0) +#endif /* OSL_GET_LOCALTIME */ + +#ifndef OSL_LOCALTIME_NS +#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC) +#endif /* OSL_LOCALTIME_NS */ + +#ifndef OSL_SYSTZTIME_US +#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US() +#endif /* OSL_GET_SYSTZTIME */ + #ifndef OSL_SYS_HALT #define OSL_SYS_HALT() do {} while (0) #endif // endif diff --git a/bcmdhd.100.10.315.x/include/osl_decl.h b/bcmdhd.100.10.315.x/include/osl_decl.h index 8b487de..652b319 100644 --- a/bcmdhd.100.10.315.x/include/osl_decl.h +++ b/bcmdhd.100.10.315.x/include/osl_decl.h @@ -1,7 +1,7 @@ /* * osl forward declarations * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/osl_ext.h b/bcmdhd.100.10.315.x/include/osl_ext.h index 024ca5c..da2eb6e 100644 --- a/bcmdhd.100.10.315.x/include/osl_ext.h +++ b/bcmdhd.100.10.315.x/include/osl_ext.h @@ -2,7 +2,7 @@ * OS Abstraction Layer Extension - the APIs defined by the "extension" API * are only supported by a subset of all operating systems. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/p2p.h b/bcmdhd.100.10.315.x/include/p2p.h index 8bb14b5..fc3032f 100644 --- a/bcmdhd.100.10.315.x/include/p2p.h +++ b/bcmdhd.100.10.315.x/include/p2p.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/packed_section_end.h b/bcmdhd.100.10.315.x/include/packed_section_end.h index 886f967..3fedac4 100644 --- a/bcmdhd.100.10.315.x/include/packed_section_end.h +++ b/bcmdhd.100.10.315.x/include/packed_section_end.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -38,7 +38,7 @@ * * <> * - * $Id: packed_section_end.h 666738 2016-10-24 12:16:37Z $ + * $Id: packed_section_end.h 776894 2018-08-16 05:50:57Z $ */ /* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h diff --git a/bcmdhd.100.10.315.x/include/packed_section_start.h b/bcmdhd.100.10.315.x/include/packed_section_start.h index 955cf68..abb2cff 100644 --- a/bcmdhd.100.10.315.x/include/packed_section_start.h +++ b/bcmdhd.100.10.315.x/include/packed_section_start.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -38,7 +38,7 @@ * * <> * - * $Id: packed_section_start.h 666738 2016-10-24 12:16:37Z $ + * $Id: packed_section_start.h 776894 2018-08-16 05:50:57Z $ */ #ifndef _alignment_test_ diff --git a/bcmdhd.100.10.315.x/include/pcicfg.h b/bcmdhd.100.10.315.x/include/pcicfg.h index 3b4a736..af49144 100644 --- a/bcmdhd.100.10.315.x/include/pcicfg.h +++ b/bcmdhd.100.10.315.x/include/pcicfg.h @@ -1,7 +1,7 @@ /* * pcicfg.h: PCI configuration constants and structures. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: pcicfg.h 756835 2018-04-10 21:24:10Z $ + * $Id: pcicfg.h 795237 2018-12-18 03:26:49Z $ */ #ifndef _h_pcicfg_ @@ -78,6 +78,7 @@ #define PCI_CAP_MSICAP_ID 0x05 #define PCI_CAP_VENDSPEC_ID 0x09 #define PCI_CAP_PCIECAP_ID 0x10 +#define PCI_CAP_MSIXCAP_ID 0x11 /* Data structure to define the Message Signalled Interrupt facility * Valid for PCI and PCIE configurations @@ -135,11 +136,13 @@ typedef struct _pciconfig_cap_pcie { /* PCIE Extended configuration */ #define PCIE_ADV_CORR_ERR_MASK 0x114 +#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14 #define CORR_ERR_RE (1 << 0) /* Receiver */ -#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ #define CORR_ERR_BD (1 << 7) /* Bad DLLP */ #define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ #define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */ #define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ CORR_ERR_RR | CORR_ERR_RT) @@ -248,10 +251,14 @@ typedef struct _pcie_enhanced_caphdr { #define PCI_PHY_DBG_CLKREG_3 0x1e1c /* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */ +#define PCIE_BAR1COHERENTACCEN_BIT 8 +#define PCIE_BAR2COHERENTACCEN_BIT 9 #define PCIE_SSRESET_STATUS_BIT 13 #define PCIE_SSRESET_DISABLE_BIT 14 #define PCIE_SSRESET_DIS_ENUM_RST_BIT 15 +#define PCIE_BARCOHERENTACCEN_MASK 0x300 + /* Bit settings for PCI_UC_ERR_STATUS register */ #define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */ #define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */ diff --git a/bcmdhd.100.10.315.x/include/pcie_core.h b/bcmdhd.100.10.315.x/include/pcie_core.h index ef1fb02..18905a0 100644 --- a/bcmdhd.100.10.315.x/include/pcie_core.h +++ b/bcmdhd.100.10.315.x/include/pcie_core.h @@ -1,7 +1,7 @@ /* * BCM43XX PCIE core hardware definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: pcie_core.h 770722 2018-07-04 11:24:12Z $ + * $Id: pcie_core.h 792442 2018-12-05 00:20:53Z $ */ #ifndef _PCIE_CORE_H #define _PCIE_CORE_H @@ -322,6 +322,7 @@ typedef volatile struct sbpcieregs { uint32 clk_ctl_st; /* 0xAE0 */ uint32 PAD[1]; /* 0xAE4 */ uint32 powerctl; /* 0xAE8 */ + uint32 PAD[5]; /* 0xAEC-0xAFF */ } dar; /* corerev > = 64 */ struct { @@ -351,8 +352,34 @@ typedef volatile struct sbpcieregs { uint32 erraddr; /* 0xA64 */ uint32 mbox_int; /* 0xA68 */ uint32 fis_ctrl; /* 0xA6C */ + uint32 PAD[36]; /* 0xA70-0xAFF */ } dar_64; } u1; + uint32 PAD[64]; /* 0xB00-0xBFF */ + /* Function Control/Status Registers for corerev >= 64 */ + /* 0xC00 - 0xCFF */ + struct { + uint32 control; /* 0xC00 */ + uint32 iostatus; /* 0xC04 */ + uint32 capability; /* 0xC08 */ + uint32 PAD[1]; /* 0xC0C */ + uint32 intstatus; /* 0xC10 */ + uint32 intmask; /* 0xC14 */ + uint32 pwr_intstatus; /* 0xC18 */ + uint32 pwr_intmask; /* 0xC1C */ + uint32 msi_vector; /* 0xC20 */ + uint32 msi_intmask; /* 0xC24 */ + uint32 msi_intstatus; /* 0xC28 */ + uint32 msi_pend_cnt; /* 0xC2C */ + uint32 mbox_intstatus; /* 0xC30 */ + uint32 mbox_intmask; /* 0xC34 */ + uint32 ltr_state; /* 0xC38 */ + uint32 PAD[1]; /* 0xC3C */ + uint32 intr_vector; /* 0xC40 */ + uint32 intr_addrlow; /* 0xC44 */ + uint32 intr_addrhigh; /* 0xC48 */ + uint32 PAD[45]; /* 0xC4C-0xCFF */ + } ftn_ctrl; } sbpcieregs_t; #define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */ @@ -375,6 +402,12 @@ typedef volatile struct sbpcieregs { #define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */ #define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */ +/* Function control (corerev > 64) */ +#define PCIE_CPLCA_ENABLE 0x01 +/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */ +#define PCIE_DLY_PERST_TO_COE 0x02 +/* when set, PERST is holding asserted until sprom-related register updates has completed */ + #define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ #define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ #define PCIE_SWPME_FN0 0x10000 @@ -919,8 +952,8 @@ typedef volatile struct sbpcieregs { #define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */ #define PCIH2D_DB1_2 0x164 #define PCID2H_MailBox_2 0x168 +#define PCIE_CLK_CTRL 0x1E0 #define PCIE_PWR_CTRL 0x1E8 -#define PCIE_CLK_CTRL 0x1E8 #define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00) /* for corerev < 64 idma_en is in PCIControl regsiter */ @@ -928,6 +961,9 @@ typedef volatile struct sbpcieregs { #define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48) #define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C) #define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20) +#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24) +#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4) +#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8) #define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0) #define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8) #define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20) diff --git a/bcmdhd.100.10.315.x/include/rte_ioctl.h b/bcmdhd.100.10.315.x/include/rte_ioctl.h index cbeef7d..18900ab 100644 --- a/bcmdhd.100.10.315.x/include/rte_ioctl.h +++ b/bcmdhd.100.10.315.x/include/rte_ioctl.h @@ -1,7 +1,7 @@ /* * HND Run Time Environment ioctl. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbchipc.h b/bcmdhd.100.10.315.x/include/sbchipc.h index e4e25ac..abd7aaf 100644 --- a/bcmdhd.100.10.315.x/include/sbchipc.h +++ b/bcmdhd.100.10.315.x/include/sbchipc.h @@ -5,9 +5,9 @@ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, * GPIO interface, extbus, and support for serial and parallel flashes. * - * $Id: sbchipc.h 763883 2018-05-22 17:57:56Z $ + * $Id: sbchipc.h 825481 2019-06-14 10:06:03Z $ * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -613,6 +613,8 @@ typedef volatile struct { #define PMU_PLL_CONTROL_DATA 0x664 #define CC_SROM_CTRL 0x190 +#define CC_SROM_ADDRESS 0x194u +#define CC_SROM_DATA 0x198u #ifdef SROM16K_4364_ADDRSPACE #define CC_SROM_OTP 0xa000 /* SROM/OTP address space */ #else @@ -3135,42 +3137,6 @@ created for 4369 #define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF #define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647 -/* 4367 related */ -#define RES4367_ABUCK 0 -#define RES4367_CBUCK 1 -#define RES4367_MISCLDO_PU 2 -#define RES4367_VBOOST 3 -#define RES4367_LDO3P3_PU 4 -#define RES4367_LAST_LPO_AVAIL 5 -#define RES4367_XTAL_PU 6 -#define RES4367_XTAL_STABLE 7 -#define RES4367_PWRSW_DIG 8 -#define RES4367_SR_DIG 9 -#define RES4367_SPARE10 10 -#define RES4367_PWRSW_AUX 11 -#define RES4367_SR_AUX 12 -#define RES4367_SPARE2 13 -#define RES4367_PWRSW_MAIN 14 -#define RES4367_SR_MAIN 15 -#define RES4367_ARMPLL_PWRUP 16 -#define RES4367_DIG_CORE_RDY 17 -#define RES4367_CORE_RDY_AUX 18 -#define RES4367_ALP_AVAIL 19 -#define RES4367_RADIO_AUX_PU 20 -#define RES4367_MINIPMU_AUX_PU 21 -#define RES4367_CORE_RDY_MAIN 22 -#define RES4367_RADIO_MAIN_PU 23 -#define RES4367_MINIPMU_MAIN_PU 24 -#define RES4367_PCIE_RET 25 -#define RES4367_COLD_START_WAIT 26 -#define RES4367_ARMPLL_HTAVAIL 27 -#define RES4367_HT_AVAIL 28 -#define RES4367_MACPHY_AUX_CLK_AVAIL 29 -#define RES4367_MACPHY_MAIN_CLK_AVAIL 30 -#define RES4367_RESERVED_31 31 - -#define CST4367_SPROM_PRESENT (1 << 17) - /* 43430 PMU resources based on pmu_params.xls */ #define RES43430_LPLDO_PU 0 #define RES43430_BG_PU 1 @@ -3319,6 +3285,7 @@ created for 4369 #define CR4_4369_RAM_BASE (0x170000) #define CR4_4377_RAM_BASE (0x170000) #define CR4_43751_RAM_BASE (0x170000) +#define CR4_43752_RAM_BASE (0x170000) #define CA7_4367_RAM_BASE (0x200000) #define CR4_4378_RAM_BASE (0x352000) @@ -4624,14 +4591,22 @@ created for 4369 #define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */ #define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */ #define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */ -#define SRPWR_DMN_ALL_MASK (0xF) + +#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */ +#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */ +#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */ + +/* all power domain mask */ +#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih) #define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */ -#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT) +#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT) + #define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */ -#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT) -#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */ -#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT) +#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT) + +#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */ +#define SRPWR_DMN_ID_MASK (0xF) /* PMU Precision Usec Timer */ #define PMU_PREC_USEC_TIMER_ENABLE 0x1 diff --git a/bcmdhd.100.10.315.x/include/sbconfig.h b/bcmdhd.100.10.315.x/include/sbconfig.h index d38364e..05c337a 100644 --- a/bcmdhd.100.10.315.x/include/sbconfig.h +++ b/bcmdhd.100.10.315.x/include/sbconfig.h @@ -1,7 +1,7 @@ /* * Broadcom SiliconBackplane hardware register definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbgci.h b/bcmdhd.100.10.315.x/include/sbgci.h index 121f04a..3fd6dca 100644 --- a/bcmdhd.100.10.315.x/include/sbgci.h +++ b/bcmdhd.100.10.315.x/include/sbgci.h @@ -1,7 +1,7 @@ /* * SiliconBackplane GCI core hardware definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbhndarm.h b/bcmdhd.100.10.315.x/include/sbhndarm.h new file mode 100644 index 0000000..1aa4607 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbhndarm.h @@ -0,0 +1,141 @@ +/* + * Broadcom SiliconBackplane ARM definitions + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbhndarm.h 799498 2019-01-16 06:02:27Z $ + */ + +#ifndef _sbhndarm_h_ +#define _sbhndarm_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* cortex-m3 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corestatus; /* 0x4 */ + uint32 PAD[1]; + uint32 biststatus; /* 0xc */ + uint32 nmiisrst; /* 0x10 */ + uint32 nmimask; /* 0x14 */ + uint32 isrmask; /* 0x18 */ + uint32 PAD[1]; + uint32 resetlog; /* 0x20 */ + uint32 gpioselect; /* 0x24 */ + uint32 gpioenable; /* 0x28 */ + uint32 PAD[1]; + uint32 bpaddrlo; /* 0x30 */ + uint32 bpaddrhi; /* 0x34 */ + uint32 bpdata; /* 0x38 */ + uint32 bpindaccess; /* 0x3c */ + uint32 ovlidx; /* 0x40 */ + uint32 ovlmatch; /* 0x44 */ + uint32 ovladdr; /* 0x48 */ + uint32 PAD[13]; + uint32 bwalloc; /* 0x80 */ + uint32 PAD[3]; + uint32 cyclecnt; /* 0x90 */ + uint32 inttimer; /* 0x94 */ + uint32 intmask; /* 0x98 */ + uint32 intstatus; /* 0x9c */ + uint32 PAD[80]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ +} cm3regs_t; +#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg) + +/* cortex-R4 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corecapabilities; /* 0x4 */ + uint32 corestatus; /* 0x8 */ + uint32 biststatus; /* 0xc */ + uint32 nmiisrst; /* 0x10 */ + uint32 nmimask; /* 0x14 */ + uint32 isrmask; /* 0x18 */ + uint32 swintreg; /* 0x1C */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; /* 0x24 */ + uint32 cyclecnt; /* 0x28 */ + uint32 inttimer; /* 0x2c */ + uint32 gpioselect; /* 0x30 */ + uint32 gpioenable; /* 0x34 */ + uint32 PAD[2]; + uint32 bankidx; /* 0x40 */ + uint32 bankinfo; /* 0x44 */ + uint32 bankstbyctl; /* 0x48 */ + uint32 bankpda; /* 0x4c */ + uint32 PAD[6]; + uint32 tcampatchctrl; /* 0x68 */ + uint32 tcampatchtblbaseaddr; /* 0x6c */ + uint32 tcamcmdreg; /* 0x70 */ + uint32 tcamdatareg; /* 0x74 */ + uint32 tcambankxmaskreg; /* 0x78 */ + uint32 PAD[89]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ +} cr4regs_t; +#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg) + +/* cortex-A7 */ +typedef volatile struct { + uint32 corecontrol; /* 0x0 */ + uint32 corecapabilities; /* 0x4 */ + uint32 corestatus; /* 0x8 */ + uint32 tracecontrol; /* 0xc */ + uint32 PAD[8]; + uint32 gpioselect; /* 0x30 */ + uint32 gpioenable; /* 0x34 */ + uint32 PAD[106]; + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ +} ca7regs_t; +#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg) + +#if defined(__ARM_ARCH_7M__) +#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg) +#endif /* __ARM_ARCH_7M__ */ + +#if defined(__ARM_ARCH_7R__) +#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg) +#endif /* __ARM_ARCH_7R__ */ + +#if defined(__ARM_ARCH_7A__) +#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg) +#endif /* __ARM_ARCH_7A__ */ + +#endif /* _LANGUAGE_ASSEMBLY */ + +#endif /* _sbhndarm_h_ */ diff --git a/bcmdhd.100.10.315.x/include/sbhnddma.h b/bcmdhd.100.10.315.x/include/sbhnddma.h index 88a8844..862a1dc 100644 --- a/bcmdhd.100.10.315.x/include/sbhnddma.h +++ b/bcmdhd.100.10.315.x/include/sbhnddma.h @@ -2,7 +2,7 @@ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface * This supports the following chips: BCM42xx, 44xx, 47xx . * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbpcmcia.h b/bcmdhd.100.10.315.x/include/sbpcmcia.h index 9dec294..5e42248 100644 --- a/bcmdhd.100.10.315.x/include/sbpcmcia.h +++ b/bcmdhd.100.10.315.x/include/sbpcmcia.h @@ -1,7 +1,7 @@ /* * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbsdio.h b/bcmdhd.100.10.315.x/include/sbsdio.h index 942fc4b..38fee22 100644 --- a/bcmdhd.100.10.315.x/include/sbsdio.h +++ b/bcmdhd.100.10.315.x/include/sbsdio.h @@ -4,7 +4,7 @@ * * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbsdpcmdev.h b/bcmdhd.100.10.315.x/include/sbsdpcmdev.h index 78bd979..fba8ad7 100644 --- a/bcmdhd.100.10.315.x/include/sbsdpcmdev.h +++ b/bcmdhd.100.10.315.x/include/sbsdpcmdev.h @@ -2,7 +2,7 @@ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific * device core support * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbsocram.h b/bcmdhd.100.10.315.x/include/sbsocram.h index c78479e..6cb2414 100644 --- a/bcmdhd.100.10.315.x/include/sbsocram.h +++ b/bcmdhd.100.10.315.x/include/sbsocram.h @@ -1,7 +1,7 @@ /* * BCM47XX Sonics SiliconBackplane embedded ram core * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sbsysmem.h b/bcmdhd.100.10.315.x/include/sbsysmem.h index 7c5ad99..bd347cc 100644 --- a/bcmdhd.100.10.315.x/include/sbsysmem.h +++ b/bcmdhd.100.10.315.x/include/sbsysmem.h @@ -1,7 +1,7 @@ /* * SiliconBackplane System Memory core * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sdio.h b/bcmdhd.100.10.315.x/include/sdio.h index d26c45b..90eddcc 100644 --- a/bcmdhd.100.10.315.x/include/sdio.h +++ b/bcmdhd.100.10.315.x/include/sdio.h @@ -2,7 +2,7 @@ * SDIO spec header file * Protocol and standard (common) device definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sdioh.h b/bcmdhd.100.10.315.x/include/sdioh.h index 90e7208..907b3f6 100644 --- a/bcmdhd.100.10.315.x/include/sdioh.h +++ b/bcmdhd.100.10.315.x/include/sdioh.h @@ -2,7 +2,7 @@ * SDIO Host Controller Spec header file * Register map and definitions for the Standard Host Controller * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sdiovar.h b/bcmdhd.100.10.315.x/include/sdiovar.h index 9c1a94e..8dea70a 100644 --- a/bcmdhd.100.10.315.x/include/sdiovar.h +++ b/bcmdhd.100.10.315.x/include/sdiovar.h @@ -2,7 +2,7 @@ * Structure used by apps whose drivers access SDIO drivers. * Pulled out separately so dhdu and wlu can both use it. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/sdspi.h b/bcmdhd.100.10.315.x/include/sdspi.h index d16c788..23f32b2 100644 --- a/bcmdhd.100.10.315.x/include/sdspi.h +++ b/bcmdhd.100.10.315.x/include/sdspi.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Standard * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/siutils.h b/bcmdhd.100.10.315.x/include/siutils.h index a4251b8..670e5e5 100644 --- a/bcmdhd.100.10.315.x/include/siutils.h +++ b/bcmdhd.100.10.315.x/include/siutils.h @@ -2,7 +2,7 @@ * Misc utility routines for accessing the SOC Interconnects * of Broadcom HNBU chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: siutils.h 769534 2018-06-26 21:19:11Z $ + * $Id: siutils.h 798061 2019-01-04 23:27:15Z $ */ #ifndef _siutils_h_ @@ -209,8 +209,10 @@ typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); #define ARMCR4_TCBANB_SHIFT 0 #define SICF_CPUHALT (0x0020) -#define ARMCR4_BSZ_MASK 0x3f -#define ARMCR4_BSZ_MULT 8192 +#define ARMCR4_BSZ_MASK 0x7f +#define ARMCR4_BUNITSZ_MASK 0x200 +#define ARMCR4_BSZ_8K 8192 +#define ARMCR4_BSZ_1K 1024 #define SI_BPIND_1BYTE 0x1 #define SI_BPIND_2BYTE 0x3 #define SI_BPIND_4BYTE 0xF @@ -261,6 +263,7 @@ extern uint si_numd11coreunits(si_t *sih); extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); extern volatile void *si_setcoreidx(si_t *sih, uint coreidx); extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern uint32 si_oobr_baseaddr(si_t *sih, bool second); extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); extern int si_numaddrspaces(si_t *sih); @@ -759,6 +762,7 @@ extern uint32 si_srpwr_request(si_t *sih, uint32 mask, uint32 val); extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val); extern uint32 si_srpwr_stat(si_t *sih); extern uint32 si_srpwr_domain(si_t *sih); +extern uint32 si_srpwr_domain_all_mask(si_t *sih); /* SR Power Control */ /* No capabilities bit so using chipid for now */ @@ -786,6 +790,7 @@ extern uint32 si_srpwr_domain(si_t *sih); * ARM, TCM, Main, Aux * Host needs to power up */ +#define MULTIBP_CAP(sih) (FALSE) #define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable) uint32 si_enum_base(uint devid); @@ -798,4 +803,7 @@ void ai_dump_APB_Bridge_registers(si_t *sih); void si_clrirq_idx(si_t *sih, uint core_idx); +/* return if scan core is present */ +bool si_scan_core_present(si_t *sih); + #endif /* _siutils_h_ */ diff --git a/bcmdhd.100.10.315.x/include/spid.h b/bcmdhd.100.10.315.x/include/spid.h index 6d41222..dc3897e 100644 --- a/bcmdhd.100.10.315.x/include/spid.h +++ b/bcmdhd.100.10.315.x/include/spid.h @@ -1,7 +1,7 @@ /* * SPI device spec header file * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/trxhdr.h b/bcmdhd.100.10.315.x/include/trxhdr.h index 77a7f09..9a299a9 100644 --- a/bcmdhd.100.10.315.x/include/trxhdr.h +++ b/bcmdhd.100.10.315.x/include/trxhdr.h @@ -1,7 +1,7 @@ /* * TRX image file header format. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/typedefs.h b/bcmdhd.100.10.315.x/include/typedefs.h index 397a343..c862f49 100644 --- a/bcmdhd.100.10.315.x/include/typedefs.h +++ b/bcmdhd.100.10.315.x/include/typedefs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/vlan.h b/bcmdhd.100.10.315.x/include/vlan.h index 9f5fcdd..6862e3e 100644 --- a/bcmdhd.100.10.315.x/include/vlan.h +++ b/bcmdhd.100.10.315.x/include/vlan.h @@ -1,7 +1,7 @@ /* * 802.1Q VLAN protocol definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/wlfc_proto.h b/bcmdhd.100.10.315.x/include/wlfc_proto.h index 4684634..0e10e97 100644 --- a/bcmdhd.100.10.315.x/include/wlfc_proto.h +++ b/bcmdhd.100.10.315.x/include/wlfc_proto.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/wlioctl.h b/bcmdhd.100.10.315.x/include/wlioctl.h index dbda76b..6dc718d 100644 --- a/bcmdhd.100.10.315.x/include/wlioctl.h +++ b/bcmdhd.100.10.315.x/include/wlioctl.h @@ -6,7 +6,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -29,7 +29,7 @@ * * <> * - * $Id: wlioctl.h 771856 2018-07-12 05:11:56Z $ + * $Id: wlioctl.h 824900 2019-06-12 05:42:13Z $ */ #ifndef _wlioctl_h_ @@ -55,6 +55,12 @@ #include #include +#define SSSR_NEW_API + +/* Include bcmerror.h for error codes or aliases */ +#ifdef BCMUTILS_ERR_CODES +#include +#endif /* BCMUTILS_ERR_CODES */ /* NOTE re: Module specific error codes. * @@ -67,7 +73,7 @@ * * The error codes -4096 ... -5119 are reserved for firmware signing. * - * Next available (inclusive) range: [-7*1024 + 1, -6*1024] + * Next available (inclusive) range: [-8*1024 + 1, -7*1024] * * End Note */ @@ -604,6 +610,7 @@ typedef struct wl_bsscolor_info { uint8 bsscolor; /**= n_recent) */ + uint8 n_recent; /* Number of samples denoted as recent */ + uint8 w_recent; /* Total weight for the recent samples (as percentage) */ +} wl_wa_basic_params_t; + +typedef struct { + uint16 ver; + uint16 len; + uint8 subcmd[]; /* sub-cmd in bcm_xtlv_t */ +} wl_wa_cmd_t; + /** data structure used in 'dfs_status' wl interface, which is used to query dfs status */ typedef struct { uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */ @@ -2561,15 +2847,21 @@ typedef struct { #define WL_TXPPR_VERSION 1 #define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) #define TX_POWER_T_VERSION 45 -/** number of ppr serialization buffers, it should be reg, board and target */ -#define WL_TXPPR_SER_BUF_NUM (3) + /* curpower ppr types */ enum { PPRTYPE_TARGETPOWER = 1, PPRTYPE_BOARDLIMITS = 2, - PPRTYPE_REGLIMITS = 3 + PPRTYPE_REGLIMITS = 3, + PPRTYPE_RU_REGLIMITS = 4, + PPRTYPE_RU_BOARDLIMITS = 5, + PPRTYPE_RU_TARGETPOWER = 6, + PPRTYPE_LAST }; +/** number of ppr serialization buffers, it should be reg, board and target */ +#define WL_TXPPR_SER_BUF_NUM (PPRTYPE_LAST - 1) + typedef struct chanspec_txpwr_max { chanspec_t chanspec; /**< chanspec */ uint8 txpwr_max; /**< max txpwr in all the rates */ @@ -2721,7 +3013,6 @@ enum { #define WL_MIMO_PS_STATUS_HW_STATE_NONE 0 #define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0) #define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1) -#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2) #define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3) #define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4) #define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5) @@ -2971,7 +3262,7 @@ typedef struct wl_psbw_status_v1 { /* Bits for disable_reasons */ #define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */ #define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */ -#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* AWDL or NAN active */ +#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* slot_bss active */ #define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */ #define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */ #define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */ @@ -3099,18 +3390,17 @@ typedef struct wl_bsstrans_roamthrottle { #define NFIFO 6 /**< # tx/rx fifopairs */ +#ifndef NFIFO_EXT #if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED) -#if defined(WL_MU_TX) && !defined(WL_MU_TX_DISABLED) -#define NFIFO_EXT 32 /* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */ -#else #define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */ -#endif // endif #elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) #define NFIFO_EXT 10 #else #define NFIFO_EXT NFIFO #endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */ +#endif /* NFIFO_EXT */ +/* When new reason codes are added to list, Please update wl_reinit_names also */ /* Reinit reason codes */ enum { WL_REINIT_RC_NONE = 0, @@ -3172,7 +3462,13 @@ enum { WL_REINIT_RC_LAST /* This must be the last entry */ }; +#define WL_REINIT_RC_INVALID 255 + #define NREINITREASONCOUNT 8 +/* NREINITREASONCOUNT is 8 in other branches. + * Any change to this will break wl tool compatibility with other branches + * #define NREINITREASONCOUNT WL_REINIT_RC_LAST + */ #define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0) @@ -3187,10 +3483,11 @@ enum { /* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */ #define FIRST_COUNTER_OFFSET 0x04 +/* need for now due to src/wl/ndis automerged to other branches. e.g. BISON */ #define WLC_WITH_XTLV_CNT /* Number of xtlv info as required to calculate subcounter offsets */ -#define WL_CNT_XTLV_ID_NUM 10 +#define WL_CNT_XTLV_ID_NUM 12 #define WL_TLV_IOV_VER 1 /** @@ -3203,6 +3500,7 @@ enum wl_cnt_xtlv_id { WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension */ WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */ WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */ + WL_CNT_XTLV_WLC_HE_OMI = 0x104, /* he omi counters */ WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ @@ -3218,12 +3516,43 @@ enum wl_periodic_slice_state_xtlv_id { WL_STATE_COMPACT_HE_COUNTERS = 0x3 }; +/* Sub tlvs for chan_counters */ +enum wl_periodic_chan_xtlv_id { + WL_CHAN_GENERIC_COUNTERS = 0x1, + WL_CHAN_PERIODIC_COUNTERS = 0x2 +}; + +#ifdef WLC_CHAN_ECNTR_TEST +#define WL_CHAN_PERIODIC_CNTRS_VER_1 1 +typedef struct wlc_chan_periodic_cntr +{ + uint16 version; + uint16 pad; + uint32 rxstrt; +} wlc_chan_periodic_cntr_t; +#endif /* WLC_CHAN_ECNTR_TEST */ + +#define WL_CHANCNTR_HDR_VER_1 1 +typedef struct wlc_chan_cntr_hdr_v1 +{ + uint16 version; + uint16 pad; + chanspec_t chanspec; /* Dont add any fields above this */ + uint16 pad1; + uint32 total_time; + uint32 chan_entry_cnt; +} wlc_chan_cntr_hdr_v1_t; + /* tlv IDs uniquely identifies periodic state component */ enum wl_periodic_if_state_xtlv_id { WL_STATE_IF_COMPACT_STATE = 0x1, WL_STATE_IF_ADPS_STATE = 0x02 }; +enum wl_periodic_tdls_if_state_xtlv_id { + WL_STATE_IF_TDLS_STATE = 0x1 +}; + #define TDMTX_CNT_VERSION_V1 1 #define TDMTX_CNT_VERSION_V2 2 @@ -3368,6 +3697,7 @@ enum { #define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t)) +#define WL_CNT_HE_OMI_STRUCT_SZ ((uint32)sizeof(wl_he_omi_cnt_wlc_v1_t)) #define INVALID_CNT_VAL (uint32)(-1) #define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \ @@ -3377,6 +3707,10 @@ enum { #define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t)) +/* Please refer to the twiki for counters addition/deletion. + * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition + */ + /** Top structure of counters IOVar buffer */ typedef struct { uint16 version; /**< see definition of WL_CNT_T_VERSION */ @@ -3659,7 +3993,15 @@ typedef struct { uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ uint32 pmk_badlen_cnt; /* number of invalid pmk len */ - + uint32 txbar_notx; /* number of TX BAR not sent (maybe supressed or muted) */ + uint32 txbar_noack; /* number of TX BAR sent, but not acknowledged by peer */ + uint32 rxfrag_agedout; /**< # of aged out rx fragmentation */ + + /* Do not remove or rename in the middle of this struct. + * All counter variables have to be of uint32. + * Please follow the instruction in + * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition + */ } wl_cnt_wlc_t; /* he counters Version 1 */ @@ -3710,7 +4052,9 @@ typedef struct wl_he_cnt_wlc_v2 { } wl_he_cnt_wlc_v2_t; /* he counters Version 3 */ +#define WL_RU_TYPE_MAX 6 #define HE_COUNTERS_V3 (3) + typedef struct wl_he_cnt_wlc_v3 { uint16 version; uint16 len; @@ -3741,13 +4085,137 @@ typedef struct wl_he_cnt_wlc_v3 { uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger * because of no frames in fifo's */ + uint32 he_myAID_cnt; + uint32 he_rxtrig_bfm_cnt; + uint32 he_rxtrig_mubar; + uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */ + uint32 txheru[WL_RU_TYPE_MAX]; + uint32 he_mgmt_tbppdu; + uint32 he_cs_req_tx_cancel; + uint32 he_wrong_nss; + uint32 he_trig_unsupp_rate; + uint32 he_rxtrig_nfrp; + uint32 he_rxtrig_bqrp; + uint32 he_rxtrig_gcrmubar; } wl_he_cnt_wlc_v3_t; +/* he counters Version 4 */ +#define HE_COUNTERS_V4 (4) +typedef struct wl_he_cnt_wlc_v4 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxtsrt_hemuppdu_cnt; /**< rxing HE MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ + uint32 he_rxstrt_hesuppdu_cnt; /**< rxing SU PPDU */ + uint32 he_rxstrt_hesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */ + uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger + * because of zero aggregation + */ + uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */ + uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger + * because of no frames in fifo's + */ + uint32 he_myAID_cnt; + uint32 he_rxtrig_bfm_cnt; + uint32 he_rxtrig_mubar; + uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */ + uint32 txheru[WL_RU_TYPE_MAX]; + uint32 he_mgmt_tbppdu; + uint32 he_cs_req_tx_cancel; + uint32 he_wrong_nss; + uint32 he_trig_unsupp_rate; + uint32 he_rxtrig_nfrp; + uint32 he_rxtrig_bqrp; + uint32 he_rxtrig_gcrmubar; + uint32 he_rxtrig_basic_htpack; /**< triggers received with HTP ack policy */ + uint32 he_rxtrig_ed_cncl; /**< count of cancelled packets + * becasue of cs_req in trigger frame + */ + uint32 he_rxtrig_suppr_null_tbppdu; /**< count of null frame sent becasue of + * suppression scenarios + */ + uint32 he_ulmu_disable; /**< number of UL MU disable scenario's handled in ucode */ + uint32 he_ulmu_data_disable; /**= 80) */ @@ -4044,7 +4514,9 @@ typedef struct { uint32 PAD[6]; uint32 rxerr_stat; uint32 ctx_fifo_full; - uint32 PAD[38]; /* PAD added for counter elements to be added soon */ + uint32 PAD0[9]; + uint32 ctmode_ufc_cnt; + uint32 PAD1[28]; /* PAD added for counter elements to be added soon */ } wl_cnt_ge80mcst_v1_t; typedef struct { @@ -4053,6 +4525,7 @@ typedef struct { } wl_cnt_ge80_txfunfl_v1_t; /** MACSTAT counters for "wl counter" version <= 10 */ +/* With ucode before its macstat cnts cleaned up */ typedef struct { /* MAC counters: 32-bit version of d11.h's macstat_t */ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, @@ -4137,6 +4610,7 @@ typedef struct { uint32 rxdrop20s; /**< drop secondary cnt */ uint32 rxtoolate; /**< receive too late */ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + /* All counter variables have to be of uint32. */ } wl_cnt_v_le10_mcst_t; #define MAX_RX_FIFO 3 @@ -4447,6 +4921,12 @@ typedef struct { uint32 rxbcast; /* BroadcastReceivedFrameCount */ uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ + /* This structure is deprecated and used only for ver <= 11. + * All counter variables have to be of uint32. + * Please refer to the following twiki before editing. + * http://hwnbu-twiki.sj.broadcom.com/bin/view/ + * Mwgroup/WlCounters#wlc_layer_counters_non_xTLV + */ } wl_cnt_ver_11_t; typedef struct { @@ -4811,8 +5291,10 @@ typedef struct { * fifo because a probe response could not be sent out within * the time limit defined in M_PRS_MAXTIME */ - uint32 rxnack; - uint32 frmscons; + uint32 rxnack; /**< Number of NACKS received (Afterburner) */ + uint32 frmscons; /**< Number of frames completed without transmission because of an + * Afterburner re-queue + */ uint32 txnack; /**< obsolete */ uint32 rxback; /**< blockack rxcnt */ uint32 txback; /**< blockack txcnt */ @@ -4914,6 +5396,7 @@ typedef struct { uint32 rxmpdu_stbc; /**< count for stbc received */ uint32 rxdrop20s; /**< drop secondary cnt */ + /* All counter variables have to be of uint32. */ } wl_cnt_ver_6_t; #define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */ @@ -5076,6 +5559,7 @@ typedef struct wl_mkeep_alive_pkt { #define WL_MKEEP_ALIVE_VERSION 1 #define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +/* 1/2 second precision since idle time is a seconds counter anyway */ #define WL_MKEEP_ALIVE_PRECISION 500 #define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF #define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000 @@ -5306,6 +5790,10 @@ typedef struct wl_maccapture_params { uint32 stop_ptr; /* Stop address to store */ uint8 optn_bmp; /* Options */ uint8 PAD[3]; + /* Don't change the order after this nor + * add anything in betw. Code uses offsets to populate + * registers + */ uint32 tr_mask; /* Trigger Mask */ uint32 tr_val; /* Trigger Value */ uint32 s_mask; /* Store Mode Mask */ @@ -6360,6 +6848,9 @@ typedef struct wl_pkt_filter { wl_apf_program_t apf_program; /* apf program */ wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */ } u; + /* Do NOT add structure members after the filter definitions, since they + * may include variable length arrays. + */ } wl_pkt_filter_t; /** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */ @@ -6753,7 +7244,8 @@ typedef struct wl_rssi_monitor_evt { * used for report in struct cca_chan_qual_event_t. So the ID values beyond 8-bit are used * for reporting purpose only. */ -#define WL_CHAN_QUAL_FULL_CCA (0x100 | WL_CHAN_QUAL_CCA) +#define WL_CHAN_QUAL_FULL_CCA (0x100u | WL_CHAN_QUAL_CCA) /* CCA: ibss vs. obss */ +#define WL_CHAN_QUAL_FULLPM_CCA (0x200u | WL_CHAN_QUAL_CCA) /* CCA: me vs. notme, PM vs. !PM */ #define MAX_CHAN_QUAL_LEVELS 8 @@ -6782,6 +7274,7 @@ typedef struct wl_action_obss_coex_req { /** IOVar parameter block for small MAC address array with type indicator */ #define WL_IOV_MAC_PARAM_LEN 4 +/** This value is hardcoded to be 16 and MUST match PKTQ_MAX_PREC value defined elsewhere */ #define WL_IOV_PKTQ_LOG_PRECS 16 #include @@ -6804,6 +7297,11 @@ typedef struct { } wl_iov_mac_full_params_t; /** Parameter block for PKTQ_LOG statistics */ +/* NOTE: this structure cannot change! It is exported to wlu as a binary format + * A new format revision number must be created if the interface changes + * The latest is v05; previous v01...v03 are no longer supported, v04 has + * common base with v05 +*/ #define PKTQ_LOG_COUNTERS_V4 \ /* packets requested to be stored */ \ uint32 requested; \ @@ -7046,6 +7544,8 @@ typedef struct nbr_element { #define NBR_ADD_DYNAMIC 1 #define WL_RRM_NBR_RPT_VER 1 + +#define WL_NBR_RPT_FLAG_BSS_PREF_FROM_AP 0x01 /** 11k Neighbor Report element */ typedef struct nbr_rpt_elem { uint8 version; @@ -7224,6 +7724,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { #define WLC_PMD_CHK_UNALIGN_TBTT 0x100 #define WLC_PMD_APSD_STA_UP 0x200 #define WLC_PMD_TX_PEND_WAR 0x400 /* obsolete, can be reused */ +#define WLC_PMD_NAN_AWAKE 0x400 /* Reusing for NAN */ #define WLC_PMD_GPTIMER_STAY_AWAKE 0x800 #define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000 #define WLC_PMD_NON_PRIM_STA_UP 0x4000 @@ -7393,6 +7894,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_status_v2 { /* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */ /* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */ +/* Use regs from d11.h instead of raw addresses for */ /* (at least) the chip independent registers */ typedef struct ucode_dbg_ext { uint32 x120; @@ -8000,6 +8502,26 @@ typedef BWL_PRE_PACKED_STRUCT struct { } BWL_POST_PACKED_STRUCT tx_pwr_rpt_t; #include +typedef struct tx_pwr_ru_rate_info { + uint16 version; + uint16 ru_alloc; + uint16 mcs; + uint16 nss; + uint16 num_he_ltf_syms; + uint16 ldpc; + uint16 gi; + uint16 txmode; + uint16 dcm; + uint16 tx_chain; +} tx_pwr_ru_rate_info_t; + +#define TX_PWR_RU_RATE_INFO_VER 1 + +/* TLV ID for curpower report, ID < 63 is reserved for ppr module */ +typedef enum tx_pwr_tlv_id { + TX_PWR_RPT_RU_RATE_INFO_ID = 64 +} tx_pwr_tlv_id_t; + #include typedef BWL_PRE_PACKED_STRUCT struct { struct ipv4_addr ipv4_addr; @@ -8208,6 +8730,44 @@ typedef struct wlc_btc_stats_v2 { uint16 rsvd; /* pad to align struct to 32bit bndry */ } wlc_btc_stats_v2_t; +/* Durations for each bt task in millisecond */ +#define WL_BTCX_DURSTATS_VER_1 (1u) +typedef struct wlc_btcx_durstats_v1 { + uint16 version; /* version number of struct */ + uint16 valid; /* validity of this struct */ + uint32 stats_update_timestamp; /* tStamp when data is updated */ + uint16 bt_acl_dur; /* acl duration in ms */ + uint16 bt_sco_dur; /* sco duration in ms */ + uint16 bt_esco_dur; /* esco duration in ms */ + uint16 bt_a2dp_dur; /* a2dp duration in ms */ + uint16 bt_sniff_dur; /* sniff duration in ms */ + uint16 bt_pscan_dur; /* page scan duration in ms */ + uint16 bt_iscan_dur; /* inquiry scan duration in ms */ + uint16 bt_page_dur; /* paging duration in ms */ + uint16 bt_inquiry_dur; /* inquiry duration in ms */ + uint16 bt_mss_dur; /* mss duration in ms */ + uint16 bt_park_dur; /* park duration in ms */ + uint16 bt_rssiscan_dur; /* rssiscan duration in ms */ + uint16 bt_iscan_sco_dur; /* inquiry scan sco duration in ms */ + uint16 bt_pscan_sco_dur; /* page scan sco duration in ms */ + uint16 bt_tpoll_dur; /* tpoll duration in ms */ + uint16 bt_sacq_dur; /* sacq duration in ms */ + uint16 bt_sdata_dur; /* sdata duration in ms */ + uint16 bt_rs_listen_dur; /* rs listen duration in ms */ + uint16 bt_rs_burst_dur; /* rs brust duration in ms */ + uint16 bt_ble_adv_dur; /* ble adv duration in ms */ + uint16 bt_ble_scan_dur; /* ble scan duration in ms */ + uint16 bt_ble_init_dur; /* ble init duration in ms */ + uint16 bt_ble_conn_dur; /* ble connection duration in ms */ + uint16 bt_task_lmp_dur; /* lmp duration in ms */ + uint16 bt_esco_retran_dur; /* esco retransmission duration in ms */ + uint16 bt_task26_dur; /* task26 duration in ms */ + uint16 bt_task27_dur; /* task27 duration in ms */ + uint16 bt_task28_dur; /* task28 duration in ms */ + uint16 bt_task_pred_dur; /* prediction task duration in ms */ + uint16 bt_multihid_dur; /* multihid duration in ms */ +} wlc_btcx_durstats_v1_t; + #define WL_IPFO_ROUTE_TBL_FIXED_LEN 4 #define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64 @@ -8231,7 +8791,7 @@ typedef struct wlc_btc_stats_v2 { #define LOGRRC_FIX_LEN 8 #define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) /* BCMWAPI_WAI */ -#define IV_LEN 16 +#define IV_LEN 16 /* same as SMS4_WPI_PN_LEN */ struct wapi_sta_msg_t { uint16 msg_type; @@ -9653,10 +10213,10 @@ enum wl_nan_txs_reason_codes { /* For NAN TX status */ typedef struct wl_nan_event_txs { - uint8 status; /* For TX status, success or failure */ - uint8 reason_code; /* to identify reason when status is failure */ - uint16 host_seq; /* seq num to keep track of pkts sent by host */ - uint8 type; /* frame type */ + uint8 status; /* For TX status, success or failure */ + uint8 reason_code; /* to identify reason when status is failure */ + uint16 host_seq; /* seq num to keep track of pkts sent by host */ + uint8 type; /* wl_nan_frame_type_t */ uint8 pad; uint16 opt_tlvs_len; uint8 opt_tlvs[]; @@ -9668,6 +10228,13 @@ typedef struct wl_nan_event_sd_txs { uint8 req_id; /* Requestor instance id */ } wl_nan_event_sd_txs_t; +/* nanho fsm tlv WL_NAN_XTLV_NANHO_OOB_TXS(0x0b0a) */ +typedef struct wl_nan_event_nanho_txs { + uint32 fsm_id; /* nho fsm id */ + uint16 seq_id; /* nho seq id */ + uint16 pad; +} wl_nan_event_nanho_txs_t; + /* Subscribe or Publish instance Terminated */ /* WL_NAN_EVENT_TERMINATED */ @@ -9698,6 +10265,21 @@ typedef struct wl_nan_ev_receive { uint8 attr_list[0]; /* attributes payload */ } wl_nan_ev_receive_t; +/* WL_NAN_EVENT_DISC_CACHE_TIMEOUT */ +#define WL_NAN_DISC_CACHE_EXPIRY_ENTRIES_MAX 8 + +typedef struct wl_nan_disc_expired_cache_entry { + uint8 l_sub_id; /* local sub instance_id */ + uint8 r_pub_id; /* remote-matched pub instance_id */ + struct ether_addr r_nmi_addr; /* remote-matched pub nmi addr */ +} wl_nan_disc_expired_cache_entry_t; + +typedef struct wl_nan_ev_disc_cache_timeout { + uint16 count; /* no. of expired cache entries */ + uint16 pad; + wl_nan_disc_expired_cache_entry_t cache_exp_list[]; +} wl_nan_ev_disc_cache_timeout_t; + /* For NAN event mask extention */ #define WL_NAN_EVMASK_EXTN_VER 1 #define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */ @@ -9812,6 +10394,8 @@ typedef enum wl_nan_tlv { WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F), WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10), WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11), + WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x12), + WL_NAN_XTLV_SD_PEER_NMI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x13), WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02), @@ -9834,7 +10418,10 @@ typedef enum wl_nan_tlv { WL_NAN_XTLV_DAM_NA_ATTR = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), /* na attr */ WL_NAN_XTLV_HOST_ASSIST_REQ = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), /* host assist */ - WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), /* fw cap */ + /* wl_nan_fw_cap_t */ + WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), + /* wl_nan_fw_cap_v2_t */ + WL_NAN_XTLV_GEN_FW_CAP_V2 = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02), WL_NAN_XTLV_SCHED_INFO = NAN_CMD(WL_NAN_CMD_SCHED_COMP_ID, 0x01), @@ -9849,7 +10436,13 @@ typedef enum wl_nan_tlv { WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04), WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05), WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06), - WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07) + WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07), + WL_NAN_XTLV_NANHO_LOG_ERR_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x08), + WL_NAN_XTLV_NANHO_LOG_DBG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x09), + WL_NAN_XTLV_NANHO_OOB_TXS = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0A), + WL_NAN_XTLV_NANHO_DCAP_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0B), + WL_NAN_XTLV_NANHO_ELEM_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0C), + WL_NAN_XTLV_NANHO_SEC_SA = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0D) } wl_nan_tlv_t; /* Sub Module ID's for NAN */ @@ -9935,7 +10528,9 @@ enum wl_nan_sub_cmd_xtlv_id { WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C), WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D), - WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW, + WL_NAN_CMD_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E), + WL_NAN_CMD_SD_DISC_CACHE_CLEAR = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F), + WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_DISC_CACHE_CLEAR, /* nan time sync sub-commands */ @@ -9975,6 +10570,8 @@ enum wl_nan_sub_cmd_xtlv_id { WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02), WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03), WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04), + WL_NAN_CMD_RANGE_IDLE_COUNT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x05), + WL_NAN_CMD_RANGE_CANCEL_EXT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x06), /* nan debug sub-commands */ WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01), @@ -10005,10 +10602,18 @@ enum wl_nan_sub_cmd_xtlv_id { WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2, /* Host offload sub-commands */ - WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), - WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), - WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), - WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_OOB_NAF + WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), /* WILL BE REMOVED */ + WL_NAN_CMD_NANHO_INFO = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), + WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), /* unused */ + WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), /* unused */ + WL_NAN_CMD_NANHO_LOG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04), + WL_NAN_CMD_NANHO_VER = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05), + WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_VER, + + /* Add submodules below, and update WL_NAN_CMD_MAX */ + + /* ROML check for this enum should use WL_NAN_CMD_MAX */ + WL_NAN_CMD_MAX = WL_NAN_CMD_NANHO_MAX }; /* @@ -10079,6 +10684,10 @@ typedef struct wl_nan_mac_stats { uint32 naf_rx; /* NAN AF rx */ uint32 sdf_tx; /* SDF tx */ uint32 sdf_rx; /* SDF rx */ + uint32 cnt_sync_bcn_rx_tu[3]; /* delta bw */ + uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */ + uint32 cnt_role_am_dw; /* anchor master role due to dw */ + uint32 cnt_am_hop_err; /* wrong hopcount set for AM */ } wl_nan_mac_stats_t; /* NAN Sched stats */ @@ -10285,9 +10894,20 @@ typedef uint8 wl_nan_mr_changed_t; #define WL_NAN_AMR_CHANGED 1 #define WL_NAN_IMR_CHANGED 2 +/* + * The macro BCMUTILS_ERR_CODES is defined only + * when using the common header file(a new approach) bcmutils/include/bcmerror.h. + * Otherwise, use the error codes from this file. + */ +#ifndef BCMUTILS_ERR_CODES + /** status - TBD BCME_ vs NAN status - range reserved for BCME_ */ enum { /* add new status here... */ + WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */ + WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */ + WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */ + WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */ WL_NAN_E_PEER_NOTAVAIL = -2131, WL_NAN_E_SCB_EXISTS = -2130, WL_NAN_E_INVALID_PEER_NDI = -2129, @@ -10373,10 +10993,13 @@ enum { WL_NAN_E_INVALID_MAC = -2049, WL_NAN_E_BAD_INSTANCE = -2048, /* NAN status code reserved from -2048 to -3071 */ + /* Do NOT add new status below -2048 */ WL_NAN_E_ERROR = -1, WL_NAN_E_OK = 0 }; +#endif /* BCMUTILS_ERR_CODES */ + /* Error codes used in vendor specific attribute in Data Path Termination frames */ enum { WL_NAN_DPEND_E_OK = 0, @@ -10397,7 +11020,8 @@ enum { WL_NAN_DPEND_E_INVALID_NDL_QOS = 15, WL_NAN_DPEND_E_INVALID_SEC_PARAMS = 16, WL_NAN_DPEND_E_REJECT_AVAIL = 17, - WL_NAN_DPEND_E_REJECT_NDL = 18 + WL_NAN_DPEND_E_REJECT_NDL = 18, + WL_NAN_DPEND_E_SCB_NORESOURCE = 19 }; typedef int32 wl_nan_status_t; @@ -10557,7 +11181,7 @@ typedef struct wl_nan_conf_status { uint8 election_mode; /* Election mode, host or firmware */ uint8 hop_count; /* Current Hop count */ uint8 imr[8]; /* Immediate Master Rank */ - uint8 pad[4]; + uint8 pad[4]; /* remove after precommit */ uint16 opt_tlvs_len; uint8 opt_tlvs[]; } wl_nan_conf_status_t; @@ -10620,6 +11244,35 @@ typedef struct wl_nan_election_metric_state { */ typedef struct ether_addr wl_nan_cluster_id_t; +#define NHO_SEC_NCS_SK_REPLAY_CNT_LEN 8u +/* kck */ +#define NHO_SEC_NCS_SK_256_KCK_LEN 24u /* refer nan2 r21 7.1.4.1 */ +/* kek */ +#define NHO_SEC_NCS_SK_256_KEK_LEN 32u /* refer nan2 r21 7.1.4.1 */ +/* tk */ +#define NHO_SEC_NCS_SK_256_TK_LEN 32u /* refer nan2 r21 section 7 */ + +#define NHO_SEC_NCS_SK_MAX_KEY_LEN (NHO_SEC_NCS_SK_256_KCK_LEN \ + + NHO_SEC_NCS_SK_256_KEK_LEN \ + + NHO_SEC_NCS_SK_256_TK_LEN) + +#define NHO_SEC_KEY_INSTALL_FLAG (1 << 0) +#define NHO_SEC_KEY_UNINSTALL_FLAG (1 << 1) + +/* WL_NAN_XTLV_NANHO_SEC_SA */ +typedef struct nanho_sec_sa { + int8 csid; /* Cipher suite ID to identify the security type */ + uint8 kck_len; /* KCK len in key_buf */ + uint8 kek_len; /* KEK len in key_buf */ + uint8 tk_len; /* Transient key len in key_buf */ + uint16 flags; + uint16 pad; + struct ether_addr laddr; /* local mac addr */ + struct ether_addr raddr; /* remote mac addr */ + uint8 key_krc[NHO_SEC_NCS_SK_REPLAY_CNT_LEN]; /* Key Replay ctr */ + uint8 key_buf[NHO_SEC_NCS_SK_MAX_KEY_LEN]; /* PTK = KCK + KEK + TK */ +} nanho_sec_sa_t; + /* * WL_NAN_CMD_ELECTION_MERGE * 0 - disable cluster merge @@ -10662,34 +11315,6 @@ typedef struct nan_sync_master { uint32 ambtt; } nan_sync_master_t; -/* -* NAN Sync TLV(NSTLV): -* To keep NAN/AWDL concurrency time sync. -* It is generated at hybrid device, and propogated by AWDL only device. -* It contains the information needed to run NAN election -*/ -#include -typedef BWL_PRE_PACKED_STRUCT struct awdl_nan_sync_tlv { - uint16 hop_count; /* total hop_count */ - struct ether_addr src_addr; /* macaddr of the hybrid originator of nstlv */ - struct ether_addr cluster_id; /* NAN cluster ID of hybrid originator of nstlv */ - uint32 nan_tsf_h; /* NAN cluster TSF of the hybrid originator of nstlv */ - uint32 nan_tsf_l; - uint8 master_preference; - uint8 random_factor; - uint8 amr[WL_NAN_MASTER_RANK_LEN]; - uint8 orig_hop_count; /* hop_count of the origin hybrid NAN device */ - uint32 ambtt; /* Anchor Master Beacon Transmission Time */ - uint8 opt_xtlv_len; /* xtlv len */ -} BWL_POST_PACKED_STRUCT awdl_nan_sync_tlv_t; - -typedef BWL_PRE_PACKED_STRUCT struct wl_awdl_nan_sync_tlv { - uint8 type; /* 23 for NTLV */ - uint16 param_len; - awdl_nan_sync_tlv_t ntlv; -} BWL_POST_PACKED_STRUCT wl_awdl_nan_sync_tlv_t; -#include - /* NAN advertiser structure */ /* TODO RSDB: add chspec to indicates core corresponds correct core */ typedef struct nan_adv_entry { @@ -10710,8 +11335,8 @@ typedef struct nan_adv_entry { #define NAN_VIRTUAL_PEER_BIT 0x80 typedef enum { - NAC_CNT_NTLV_AF_TX = 0, /* count of AWDL AF containing NTLV tx */ - NAC_CNT_NTLV_AF_RX, /* count of AWDL AF containing NTLV rx */ + NAC_CNT_NTLV_AF_TX = 0, /* count of AF containing NTLV tx */ + NAC_CNT_NTLV_AF_RX, /* count of AF containing NTLV rx */ NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */ NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */ NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */ @@ -10956,6 +11581,9 @@ typedef struct wl_nan_sd_transmit { uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */ } wl_nan_sd_transmit_t; +/* disc cache timeout for a cache entry */ +typedef uint16 wl_nan_disc_cache_timeout_t; + /* * WL_NAN_CMD_SYNC_TSRESERVE */ @@ -11202,17 +11830,31 @@ typedef struct wl_nan_ev_p2p_avail { * discovery interface event structures * */ +enum wl_nan_oob_af_flags { + WL_NAN_OOB_AF_FLAG_SEND_EVENT = 0x0001, /* send tx status event */ + WL_NAN_OOB_AF_FLAG_FLUSH_PCB = 0x0002, /* flush PCB */ + WL_NAN_OOB_AF_FLAG_ADD_DCAP = 0x0004, /* add dev cap attr into NAF body */ + WL_NAN_OOB_AF_FLAG_ADD_ELMT = 0x0008, /* add elmt container attr into NAF body */ + WL_NAN_OOB_AF_FLAG_MFP_REQUIRED = 0x0010 /* MFP required */ +}; +typedef uint16 wl_nan_oob_af_flags_t; + /* mandatory parameters for OOB action frame */ -/* single-shot when bitmap and offset are set to 0; periodic otherwise */ typedef struct wl_nan_oob_af_params_s { - /* bitmap for the 32 timeslots in 512TU dw interval */ - uint32 ts_map; - /* offset from start of dw, in us */ - uint32 tx_offset; + uint8 fup_lcl_id; /* local instance ID of follow-up SDF */ + uint8 fup_peer_id; /* peer instance ID of follow-up SDF */ + uint8 sdf_type; /* represented by service control type NAN_SC_XXX */ + uint8 unused_uint8; + uint32 unused_uint32; struct ether_addr bssid; struct ether_addr dest; uint32 pkt_lifetime; + uint8 n2af_sub_type; /* NAN2 AF sub type */ + uint8 retry_cnt; /* NAF tx retry (not 802.11 re-tx) */ + uint16 token; /* NAN host seq num */ + uint16 flags; /* wl_nan_oob_af_flags_t */ + uint32 fsm_id; /* unique fsm id */ uint16 payload_len; uint8 payload[1]; } wl_nan_oob_af_params_t; @@ -11608,12 +12250,13 @@ enum wl_nan_host_assist_reason { WL_NAN_HAST_REASON_NONE = 0, /* reason for host assist request */ - WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */ - WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */ - WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */ - WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */ - WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */ - WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6 /* SD: NDI associated with svc is missing */ + WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */ + WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */ + WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */ + WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */ + WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */ + WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6, /* SD: NDI associated with svc is missing */ + WL_NAN_HAST_REASON_PEER_SCB_NORESOURCE = 7 /* NDP: no more peer scb available */ }; typedef uint8 wl_nan_host_assist_reason_t; @@ -11680,6 +12323,25 @@ typedef struct wl_nan_fw_cap { uint8 pad[3]; } wl_nan_fw_cap_t; +/* WL_NAN_XTLV_GEN_FW_CAP_V2 */ +typedef struct wl_nan_fw_cap_v2 { + uint32 flags1; /* nan sub-features compiled in firmware */ + uint32 flags2; /* for more sub-features in future */ + uint8 max_svc_publishes; /* max num of service publish */ + uint8 max_svc_subscribes; /* max num of service subscribe */ + uint8 max_lcl_sched_maps; /* max num of local schedule map */ + uint8 max_lcl_ndc_entries; /* max num of local NDC entry */ + uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */ + uint8 max_peer_entries; /* max num of peer entry */ + uint8 max_peer_sched_maps; /* max num of peer schedule maps */ + uint8 max_ndp_sessions; /* max num of NDP session */ + uint32 cipher_suites_supported_mask; /* bitmask for supported cipher suites */ + uint32 reserved_uint32_1; /* reserved for future sub-features */ + uint32 reserved_uint32_2; /* reserved for future sub-features */ + uint32 reserved_uint32_3; /* reserved for future sub-features */ + uint32 reserved_uint32_4; /* reserved for future sub-features */ +} wl_nan_fw_cap_v2_t; + /* nan cipher suite support mask bits */ #define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01 #define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02 @@ -11721,6 +12383,7 @@ typedef struct wl_nan_nsr_ndp_info { /* NAN2.0 Ranging definitions */ /* result indication bit map */ +#define NAN_RANGE_INDICATION_NONE 0 #define NAN_RANGE_INDICATION_CONT (1<<0) #define NAN_RANGE_INDICATION_INGRESS (1<<1) #define NAN_RANGE_INDICATION_EGRESS (1<<2) @@ -11756,6 +12419,19 @@ typedef struct wl_nan_range_resp { #define NAN_RNG_RESP_IOV_LEN 20 +#define NAN_RNG_TERM_FLAG_IMMEDIATE (1u << 0u) /* Do not wait for TXS */ +#define NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN (1u << 1u) /* Do not TX rng_term */ +#define NAN_RNG_TERM_FLAG_EVENT_HOST (1u << 2u) /* Notify event to host */ +#define NAN_RNG_TERM_FLAG_OPT_TLVS (1u << 3u) /* opt tlvs present */ + +typedef struct wl_nan_range_cancel_ext { + wl_nan_range_id range_id; + uint8 flags; + uint8 pad[2]; +} wl_nan_range_cancel_ext_t; + +#define NAN_RNG_CANCEL_IOV_FIXED_LEN 4u + #define NAN_RNG_MAX_IOV_LEN 255 typedef struct wl_nan_ev_rng_req_ind { @@ -11784,11 +12460,21 @@ typedef struct wl_nan_ev_rng_rpt_ind { #define NAN_RNG_RPT_IND_SIZE 12 +/* number of continuous ranging crbs which can be idle, +* after which ranging session will be terminated. +* Default value is 5. Set to zero for disabling the +* idle timeout functionality +*/ +typedef uint8 wl_nan_range_idle_count_t; + /* nan ranging termination reason codes */ -#define NAN_RNG_TERM_IDLE_TIMEOUT 1 /* no ftms from peer */ -#define NAN_RNG_TERM_PEER_REQ 2 -#define NAN_RNG_TERM_USER_REQ 3 -#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4 +#define NAN_RNG_TERM_UNSPECIFIED 0 +#define NAN_RNG_TERM_IDLE_TIMEOUT 1u /* no ftms from peer */ +#define NAN_RNG_TERM_PEER_REQ 2u +#define NAN_RNG_TERM_USER_REQ 3u +#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4u /* On FSM Timeout, waiting for Resp from peer */ +#define NAN_RNG_TERM_RNG_RESP_REJ 5u /* On range resp, reject from peer */ +#define NAN_RNG_TERM_RNG_TXS_FAIL 6u /* On range req/resp txs fail */ typedef struct wl_nan_ev_rng_term_ind { struct ether_addr peer_m_addr; @@ -11875,109 +12561,6 @@ typedef struct wl_nan_fastdisc_s { #define WL_NAN_FASTDISC_CFG_SIZE 1024 /* arbitrary */ -#ifdef WL_NANHO -/* ****************** NAN Host offload specific strucures ****************** */ - -enum wl_nan_rx_mgmt_frm_type { - WL_NAN_RX_MGMT_FRM_BCN = 0, - WL_NAN_RX_MGMT_FRM_SDF = 1, - WL_NAN_RX_MGMT_FRM_NAF = 2 -}; -typedef uint8 wl_nan_rx_mgmt_frm_type_t; - -/* WL_NAN_EVENT_RX_MGMT_FRM */ -typedef struct wl_nan_event_rx_mgmt_frm { - uint8 frm_type; /* wl_nan_rx_mgmt_frm_type_t */ - uint8 pad; - uint16 frm_len; - uint8 frm[]; -} wl_nan_event_rx_mgmt_frm_t; - -#define WL_NAN_NANHO_UPDATE_MAX_SIZE 2048 /* arbitrary */ - -enum wl_nan_peer_entry_action { - WL_NAN_PEER_ENTRY_ACT_ADD = 0, /* add peer entry */ - WL_NAN_PEER_ENTRY_ACT_REMOVE = 1 /* remove peer entry */ -}; -typedef uint8 wl_nan_peer_entry_action_t; - -/* WL_NAN_XTLV_NANHO_PEER_ENTRY */ -typedef struct wl_nan_peer_entry -{ - struct ether_addr nmi; /* nmi of peer device */ - uint8 action; /* wl_nan_peer_entry_action_t */ - uint8 pad; -} wl_nan_peer_entry_t; - -enum wl_nan_dcaplist_action { - WL_NAN_DCAPLIST_ACT_UPDATE = 0, /* update or add */ - WL_NAN_DCAPLIST_ACT_REMOVE = 1 /* remove (only for peer dcap cache entry) */ -}; -typedef uint8 wl_nan_dcaplist_action_t; - -/* WL_NAN_XTLV_NANHO_DCAPLIST */ -typedef struct wl_nan_dev_cap_list -{ - struct ether_addr nmi; /* null for local device */ - uint8 action; /* wl_nan_dcaplist_action_t */ - /* optional fields for WL_NAN_DCAPLIST_ACT_UPDATE */ - uint8 num_maps; - uint8 dcap[]; /* list of nan_dev_cap_t */ -} wl_nan_dev_cap_list_t; - -typedef struct wl_nan_dev_chan_sched { - uint16 num_slots; /* number of slot in schedule */ - uint16 period; /* period of channel schedule (TU) */ - uint8 slot_dur; /* slot duration (TU) */ - uint8 map_id; /* map id (TBD) */ - uint8 pad[2]; - uint8 data[]; - /* chanspec_t chan_sched[num_slot] */ - /* uint8 slot_info[num_slot] */ -} wl_nan_dev_chan_sched_t; - -/* WL_NAN_XTLV_NANHO_DCSLIST */ -typedef struct wl_nan_dev_chan_sched_list { - struct ether_addr nmi; /* null for local device */ - uint8 num_maps; - uint8 pad; - wl_nan_dev_chan_sched_t dcs[]; -} wl_nan_dev_chan_sched_list_t; - -/* WL_NAN_XTLV_NANHO_BLOB */ -typedef struct wl_nan_dev_blob { - struct ether_addr nmi; /* null for local device */ - uint16 blob_len; /* blob len in blob[] buffer */ - uint8 blob_type; - uint8 pad[3]; - uint8 blob[]; -} wl_nan_dev_blob_t; - -typedef struct wl_nan_peer_ndl_state { - struct ether_addr nmi; - uint8 ndl_state; /* nan_peer_ndl_state_t */ - uint8 pad; -} wl_nan_peer_ndl_state_t; - -enum wl_nan_ndp_state_action { - WL_NAN_NDP_STATE_ACT_ESTABLISHED = 0, - WL_NAN_NDP_STATE_ACT_TERMINATED = 1 -}; -typedef uint8 wl_nan_ndp_state_action_t; - -/* WL_NAN_XTLV_NANHO_NDP_STATE */ -typedef struct wl_nan_ndp_state { - struct ether_addr peer_nmi; - struct ether_addr peer_ndi; - struct ether_addr lcl_ndi; - uint8 action; /* wl_nan_ndp_state_action_t */ - uint8 pad; - /* TODO: secured NDP information */ -} wl_nan_ndp_state_t; - -/* *************** end of NAN Host offload specific strucures ************** */ -#endif /* WL_NANHO */ - /* ********************* end of NAN section ******************************** */ /* endif WL_NAN */ @@ -12489,6 +13072,7 @@ typedef enum { WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4), WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5), WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6), + WL_WSEC_INFO_BSS_WPA_AP_RESTRICT = (WL_WSEC_INFO_BSS_BASE + 7), /* add per-BSS properties above */ WL_WSEC_INFO_MAX = 0xffff } wl_wsec_info_type_t; @@ -12512,6 +13096,15 @@ typedef struct wl_wsec_info { uint8 num_tlvs; wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */ } wl_wsec_info_t; +#define AP_BLOCK_NONE 0x0000 /* default: No restriction */ +#define AP_ALLOW_WPA2 0x0001 /* allow WPA2PSK AP */ +#define AP_ALLOW_TSN 0x0002 /* WPA3 transition AP */ +#define AP_ALLOW_WPA3_ONLY 0x0004 /* WPA3 only AP */ +#define AP_ALLOW_MAX (AP_ALLOW_WPA2 | AP_ALLOW_TSN | \ + AP_ALLOW_WPA3_ONLY) +typedef struct { + uint32 wpa_ap_restrict; /* set WPA2 / WPA3 AP restriction policy */ +} wl_wsec_info_wpa_ap_restrict_t; /* * randmac definitions @@ -13322,7 +13915,11 @@ typedef enum wl_gpaio_option { GPAIO_DAC_IQ_DC_RDBK, GPAIO_DAC_IQ_DC_RDBK_CLEAR, GPAIO_AFE_LDO_FOR_DAC_DC, - GPAIO_PA5G_VCAS_SOURCE + GPAIO_PA5G_VCAS_SOURCE, + GPAIO_BIQ2_DC_MEAS, + GPAIO_BIQ2_DC_CLEAR, + GPAIO_VBATMONITOR, + GPAIO_PA5G_VCAS_GMDRAIN } wl_gpaio_option_t; /** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ @@ -13466,6 +14063,30 @@ typedef struct wl_band { } wl_band_t; +#define WL_ROAM_STATS_VER_1 (1u) /**< current version of wl_if_stats structure */ + +/** roam statistics counters */ +typedef struct { + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ + uint32 initial_assoc_time; + uint32 prev_roam_time; + uint32 last_roam_event_type; + uint32 last_roam_event_status; + uint32 last_roam_event_reason; + uint16 roam_success_cnt; + uint16 roam_fail_cnt; + uint16 roam_attempt_cnt; + uint16 max_roam_target_cnt; + uint16 min_roam_target_cnt; + uint16 max_cached_ch_cnt; + uint16 min_cached_ch_cnt; + uint16 partial_roam_scan_cnt; + uint16 full_roam_scan_cnt; + uint16 most_roam_reason; + uint16 most_roam_reason_cnt; +} wl_roam_stats_v1_t; + #define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */ /** wlc interface version */ @@ -13485,6 +14106,16 @@ typedef struct wl_wlc_version { } wl_wlc_version_t; +#define WL_SCAN_VERSION_T_VERSION 1 /**< current version of scan_version structure */ +/** scan interface version */ +typedef struct wl_scan_version { + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ + + /* scan interface version numbers */ + uint16 scan_ver_major; /**< scan interface major version number */ +} wl_scan_version_t; + /* Highest version of WLC_API_VERSION supported */ #define WLC_API_VERSION_MAJOR_MAX 8 #define WLC_API_VERSION_MINOR_MAX 0 @@ -13545,6 +14176,7 @@ enum { WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */ WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */ WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */ + WL_PROXD_SESSION_FLAG_RANDMAC = 0x00001000, /**< use random mac */ WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */ WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */ WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, @@ -13647,6 +14279,9 @@ enum { typedef uint16 wl_proxd_session_id_t; +/* Use WL_PROXD_E_* errorcodes from this file if BCMUTILS_ERR_CODES not defined */ +#ifndef BCMUTILS_ERR_CODES + /** status - TBD BCME_ vs proxd status - range reserved for BCME_ */ enum { WL_PROXD_E_LAST = -1056, @@ -13689,6 +14324,8 @@ enum { }; typedef int32 wl_proxd_status_t; +#endif /* BCMUTILS_ERR_CODES */ + /* proxd errors from phy */ #define PROXD_TOF_INIT_ERR_BITS 16 @@ -14086,7 +14723,7 @@ enum { typedef uint32 wl_proxd_debug_mask_t; /** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ -enum { +typedef enum { WL_PROXD_TLV_ID_NONE = 0, WL_PROXD_TLV_ID_METHOD = 1, WL_PROXD_TLV_ID_FLAGS = 2, @@ -14150,8 +14787,65 @@ enum { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */ WL_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */ + WL_PROXD_TLV_ID_COLLECT_INLINE_HEADER = 1032, + WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_INFO = 1033, + WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_DATA = 1034, + WL_PROXD_TLV_ID_COLLECT_INLINE_RESULTS = 1035, + WL_PROXD_TLV_ID_MAX -}; +} wl_proxd_tlv_types_t; + +#define TOF_COLLECT_INLINE_HEADER_INFO_VER_1 1 + +typedef struct wl_proxd_collect_inline_header_info_v1 +{ + uint16 version; + uint16 pad1; + uint32 ratespec; /* override */ + chanspec_t chanspec; + uint16 num_ftm; + struct ether_addr peer_mac; + struct ether_addr cur_ether_addr; /* source address for Tx */ +} wl_proxd_collect_inline_header_info_v1_t; + +#define TOF_COLLECT_INLINE_RESULTS_VER_1 1 +typedef struct wl_proxd_collect_inline_results_info_v1 +{ + uint16 version; + uint16 pad1; + uint32 meanrtt; + uint32 distance; + uint16 num_rtt; + uint16 pad2; + int32 status; + uint32 ratespec; +} wl_proxd_collect_inline_results_info_v1_t; + +#define TOF_COLLECT_INLINE_FRAME_INFO_VER_1 1 +typedef struct wl_proxd_collect_inline_frame_info_v1 +{ + uint16 version; + uint16 pad1; + int32 gd; + uint32 T[4]; + uint32 prev_t1; + uint32 prev_t4; + int32 hadj; + int8 rssi; + uint8 pad[3]; +} wl_proxd_collect_inline_frame_info_v1_t; + +#define TOF_COLLECT_INLINE_FRAME_INFO_VER_2 2 +typedef struct wl_proxd_collect_inline_frame_info_v2 +{ + uint16 version; + uint16 pad1; + int32 gd; + uint32 T[4]; + int32 hadj; + int8 rssi; + uint8 pad[3]; +} wl_proxd_collect_inline_frame_info_v2_t; typedef struct wl_proxd_tlv { uint16 id; @@ -14216,7 +14910,7 @@ typedef struct wl_proxd_event { wl_proxd_event_type_t type; wl_proxd_method_t method; wl_proxd_session_id_t sid; - uint8 pad[2]; + uint8 pad[2]; /* This field is used fragmentation purpose */ wl_proxd_tlv_t tlvs[1]; /**< variable */ } wl_proxd_event_t; @@ -14346,7 +15040,7 @@ typedef struct wl_bssload_cfg { #define WL_ESTM_ROAM_DELTA_DEFAULT 10 typedef struct wl_roam_prof_v3 { - int8 roam_flags; /**< bit flags */ + uint8 roam_flags; /**< bit flags */ int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ int8 rssi_lower; int8 roam_delta; @@ -14518,7 +15212,6 @@ typedef struct wnm_roam_trigger_cfg { typedef enum wl_interface_type { WL_INTERFACE_TYPE_STA = 0, WL_INTERFACE_TYPE_AP = 1, - WL_INTERFACE_TYPE_AWDL = 2, WL_INTERFACE_TYPE_NAN = 3, WL_INTERFACE_TYPE_P2P_GO = 4, WL_INTERFACE_TYPE_P2P_GC = 5, @@ -14601,6 +15294,13 @@ typedef struct wl_interface_create_v3 { #define WL_INTERFACE_INFO_VER_1 1 #define WL_INTERFACE_INFO_VER_2 2 +typedef struct wl_interface_info { + uint16 ver; /* version of this struct */ + struct ether_addr mac_addr; /* MAC address of the interface */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */ + uint8 bsscfgidx; /* source bsscfg index */ +} wl_interface_info_t; + typedef struct wl_interface_info_v1 { uint16 ver; /**< version of this struct */ struct ether_addr mac_addr; /**< MAC address of the interface */ @@ -14971,6 +15671,7 @@ typedef struct wl_txpwrcap_dump_v3 { #define CAP_2G_DEPON_5G (0x10) /* 2G pwr caps depend on other slice 5G subband */ #define CAP_SISO_MIMO (0x20) /* Siso/Mimo Separate Power Caps */ #define CAP_ANT_TX (0x40) /* Separate Power Caps based on cell ant tx value */ +#define CAP_LTE_PQBIT (0x100u) /* QPBit is enabled */ #define CAP_ONOFF_BODY_CCK_OFDM (CAP_ONOFF_BODY | CAP_CCK_OFDM) #define CAP_TXPWR_ALL (CAP_ONOFF_BODY|CAP_CCK_OFDM|CAP_LTE_CELL|\ CAP_SISO_MIMO|CAP_HEAD_BODY|CAP_ANT_TX) @@ -15013,8 +15714,9 @@ typedef struct { int8 pwrs[][TXPWRCAP_MAX_NUM_SUBGRPS][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; } wl_txpwrcap_v2_t; -#define TXPWRCAP_DUMP_VERSION_4 4 -#define TXPWRCAP_DUMP_VERSION_5 5 +#define TXPWRCAP_DUMP_VERSION_4 4u +#define TXPWRCAP_DUMP_VERSION_5 5u +#define TXPWRCAP_DUMP_VERSION_6 6u typedef struct wl_txpwrcap_dump_v4 { uint8 version; @@ -15048,6 +15750,37 @@ typedef struct wl_txpwrcap_dump_v5 { int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */ } wl_txpwrcap_dump_v5_t; +typedef struct wl_txpwrcap_dump_v6 { + uint8 version; + uint8 num_pwrcap; + uint8 current_country[2]; + uint8 current_channel; + uint8 high_cap_state_enabled; + uint8 reserved[2]; + uint8 download_present; + uint8 num_ants; /* number antenna slice */ + uint8 num_cc_groups; /* number cc groups */ + uint8 current_country_cc_group_info_index; + uint8 ant_tx; /* current value of ant_tx */ + uint8 cell_status; /* current value of cell status */ + uint16 capability[TXHDR_MAX_SECTION]; /* capabilities */ + int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */ +} wl_txpwrcap_dump_v6_t; + +#define TXCAPINFO_VERSION_1 1 +typedef struct wl_txpwrcap_ccgrp_info { + uint8 num_cc; + char cc_list[1][2]; /* 2 letters for each country. At least one country */ +} wl_txpwrcap_ccgrp_info_t; + +typedef struct { + uint16 version; + uint16 length; /* length in bytes */ + uint8 num_ccgrp; + /* followed by one or more wl_txpwrcap_ccgrp_info_t */ + wl_txpwrcap_ccgrp_info_t ccgrp_data[1]; +} wl_txpwrcap_info_t; + typedef struct wl_txpwrcap_tbl { uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; /* Stores values for valid antennas */ @@ -15346,6 +16079,38 @@ typedef struct { uint32 he_txtbppdu; } wl_compact_he_cnt_wlc_v1_t; +#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_2 (2) +typedef struct { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; + uint32 he_rxtrig_rand; + uint32 he_colormiss_cnt; + uint32 he_txmampdu; + uint32 he_txmtid_back; + uint32 he_rxmtid_back; + uint32 he_rxmsta_back; + uint32 he_txfrag; + uint32 he_rxdefrag; + uint32 he_txtrig; + uint32 he_rxtrig_basic; + uint32 he_rxtrig_murts; + uint32 he_rxtrig_bsrp; + uint32 he_rxhemuppdu_cnt; + uint32 he_physu_rx; + uint32 he_phyru_rx; + uint32 he_txtbppdu; + uint32 he_null_tbppdu; + uint32 he_rxhesuppdu_cnt; + uint32 he_rxhesureppdu_cnt; + uint32 he_null_zero_agg; + uint32 he_null_bsrp_rsp; + uint32 he_null_fifo_empty; +} wl_compact_he_cnt_wlc_v2_t; + +/* for future versions of this data structure, can consider wl_txbf_ecounters_t + * which contains the full list of txbf dump counters + */ typedef struct { uint16 version; uint16 coreup; @@ -15399,6 +16164,70 @@ typedef struct { uint32 txdeauthivalclass; } wl_event_based_statistics_v1_t; +#define WL_EVENT_STATISTICS_VER_2 (2) +/* Event based statistics ecounters */ +typedef struct { + uint16 version; + uint16 pad; + struct ether_addr BSSID; /* BSSID of the BSS */ + uint32 txdeauthivalclass; + /* addition for v2 */ + int32 timestamp; /* last deauth time */ + struct ether_addr last_deauth; /* wrong deauth MAC */ + uint16 misdeauth; /* wrong deauth count every 1sec */ + int16 cur_rssi; /* current bss rssi */ + int16 deauth_rssi; /* deauth pkt rssi */ +} wl_event_based_statistics_v2_t; + +#define WL_EVENT_STATISTICS_VER_3 (3) +/* Event based statistics ecounters */ +typedef struct { + uint16 version; + uint16 pad; + struct ether_addr BSSID; /* BSSID of the BSS */ + uint16 PAD; + uint32 txdeauthivalclass; + /* addition for v2 */ + int32 timestamp; /* last deauth time */ + struct ether_addr last_deauth; /* wrong deauth MAC */ + uint16 misdeauth; /* wrong deauth count every 1sec */ + int16 cur_rssi; /* current bss rssi */ + int16 deauth_rssi; /* deauth pkt rssi */ + /* addition for v3 (roam statistics) */ + uint32 initial_assoc_time; + uint32 prev_roam_time; + uint32 last_roam_event_type; + uint32 last_roam_event_status; + uint32 last_roam_event_reason; + uint16 roam_success_cnt; + uint16 roam_fail_cnt; + uint16 roam_attempt_cnt; + uint16 max_roam_target_cnt; + uint16 min_roam_target_cnt; + uint16 max_cached_ch_cnt; + uint16 min_cached_ch_cnt; + uint16 partial_roam_scan_cnt; + uint16 full_roam_scan_cnt; + uint16 most_roam_reason; + uint16 most_roam_reason_cnt; +} wl_event_based_statistics_v3_t; + +#define WL_EVENT_STATISTICS_VER_4 (4u) +/* Event based statistics ecounters */ +typedef struct { + uint16 version; + uint16 pad; + struct ether_addr BSSID; /* BSSID of the BSS */ + uint16 PAD; + uint32 txdeauthivalclass; + /* addition for v2 */ + int32 timestamp; /* last deauth time */ + struct ether_addr last_deauth; /* wrong deauth MAC */ + uint16 misdeauth; /* wrong deauth count every 1sec */ + int16 cur_rssi; /* current bss rssi */ + int16 deauth_rssi; /* deauth pkt rssi */ +} wl_event_based_statistics_v4_t; + /* ##### Ecounters v2 section ##### */ #define ECOUNTERS_VERSION_2 2 @@ -15778,23 +16607,23 @@ typedef struct mu_group { } mu_group_t; typedef struct mupkteng_sta { - struct ether_addr ea; + struct ether_addr ea; uint8 PAD[2]; - int32 nrxchain; - int32 idx; + int32 nrxchain; + int32 idx; } mupkteng_sta_t; typedef struct mupkteng_client { - int32 rspec; - int32 idx; - int32 flen; - int32 nframes; + int32 rspec; + int32 idx; + int32 flen; + int32 nframes; } mupkteng_client_t; typedef struct mupkteng_tx { - mupkteng_client_t client[8]; - int32 nclients; - int32 ntx; + mupkteng_client_t client[8]; + int32 nclients; + int32 ntx; } mupkteng_tx_t; /* @@ -15839,11 +16668,11 @@ enum { /* ifdef WL11ULB */ /* ULB Mode configured via "ulb_mode" IOVAR */ enum { - ULB_MODE_DISABLED = 0, - ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ - ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ + ULB_MODE_DISABLED = 0, + ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ + ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ /* Add all other enums before this */ - MAX_SUPP_ULB_MODES + MAX_SUPP_ULB_MODES }; /* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only. @@ -15852,17 +16681,18 @@ enum { * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute) */ typedef enum { - ULB_BW_DISABLED = 0, - ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ - ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ - ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ + ULB_BW_DISABLED = 0, + ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ + ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ + ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ /* Add all other enums before this */ - MAX_SUPP_ULB_BW + MAX_SUPP_ULB_BW } ulb_bw_type_t; /* endif WL11ULB */ #define WL_MESH_IOCTL_VERSION 1 #define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */ + /* container for mesh iovtls & events */ typedef struct wl_mesh_ioc { uint16 version; /* interface command or event version */ @@ -15988,9 +16818,9 @@ typedef struct { /* values for IOV_MFP arg */ enum { - WL_MFP_NONE = 0, - WL_MFP_CAPABLE, - WL_MFP_REQUIRED + WL_MFP_NONE = 0, + WL_MFP_CAPABLE, + WL_MFP_REQUIRED }; typedef enum { @@ -16004,7 +16834,6 @@ typedef enum { CHANSW_IOVAR = 7, /* channel switch due to IOVAR */ CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */ CHANSW_APCS = 9, /* Channel switch from AP channel select module */ - CHANSW_AWDL = 10, /* channel switch due to AWDL */ CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */ CHANSW_UPDBW = 12, /* channel switch at update bandwidth */ CHANSW_ULB = 13, /* channel switch at ULB */ @@ -16190,12 +17019,15 @@ enum wl_slotted_bss_cmd_id { WL_SLOTTED_BSS_CMD_CHANSEQ = 1, WL_SLOTTED_BSS_CMD_CS_BMP = 2 /* critical slots bitmap */ }; + typedef uint16 chan_seq_type_t; enum chan_seq_type { - CHAN_SEQ_TYPE_AWDL = 1, - CHAN_SEQ_TYPE_SLICE = 2, - CHAN_SEQ_TYPE_NAN = 3 + CHAN_SEQ_TYPE_AWDL = 1, + CHAN_SEQ_TYPE_SLICE = 2, + CHAN_SEQ_TYPE_NAN = 3, /* NAN avail XTLV */ + CHAN_SEQ_TYPE_NANHO = 4 /* NANHO channel schedule XTLV */ }; + typedef uint8 sched_flag_t; enum sched_flag { NO_SDB_SCHED = 0x1, @@ -16240,6 +17072,7 @@ typedef struct slice_chan_seq { #define SLOT_BSS_SLICE_TYPE_DUR_MAX_RANGE 2u #define SLOTTED_BSS_AGGR_EN (1 << 0) /* Bitmap of mode */ #define SLOTTED_BSS_AGGR_LIMIT_DUR (1 << 1) /* Jira 49554 */ +#define SLOTTED_BSS_HE_1024_QAM_SUPPORT (1 << 2) /* MCS10-11 Support */ #define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs) /* Definitions for slotted_bss stats */ @@ -16540,8 +17373,11 @@ typedef struct wl_idauth_counters { #define WLC_UTRACE_LEN (1024u * 4u) // default length #define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM +#define WLC_UTRACE_LEN_SC (1024u * 3u) // reduced length to fit smaller Scan core BM + #define WLC_UTRACE_READ_END 0 #define WLC_UTRACE_MORE_DATA 1 + typedef struct wl_utrace_capture_args_v1 { uint32 length; uint32 flag; @@ -16609,6 +17445,8 @@ enum { WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */ WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */ WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */ + WL_HC_RX_XTLV_ID_VAL_STALL_UC_DECRYPT_FAIL = 6, /* trigger uc decrypt failures */ + WL_HC_RX_XTLV_ID_VAL_STALL_BCMC_DECRYPT_FAIL = 7, /* trigger bcmc decrypt failures */ }; /* Health Check: Datapath SCAN IDs */ @@ -16637,17 +17475,30 @@ typedef enum { /* IDs of Health Check report structures for sub types of health checks within WL */ typedef enum wl_hc_dd_type { - WL_HC_DD_PCIE = 0, /* PCIe */ - WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */ - WL_HC_DD_RX_STALL = 2, /* RX stall check */ - WL_HC_DD_TX_STALL = 3, /* TX stall check */ - WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */ - WL_HC_DD_PHY = 5, /* PHY health check */ - WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */ - WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */ + WL_HC_DD_PCIE = 0, /* PCIe */ + WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */ + WL_HC_DD_RX_STALL = 2, /* RX stall check */ + WL_HC_DD_TX_STALL = 3, /* TX stall check */ + WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */ + WL_HC_DD_PHY = 5, /* PHY health check */ + WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */ + WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */ + WL_HC_DD_RX_STALL_V2 = 8, /* RX stall check v2 */ WL_HC_DD_MAX } wl_hc_dd_type_t; +/* RX stall reason codes sent with wl_rx_hc_info_v2_t */ +typedef enum bcm_rx_hc_stall_reason { + BCM_RX_HC_RESERVED = 0, + BCM_RX_HC_UNSPECIFIED = 1, /* All other. Catch all */ + BCM_RX_HC_UNICAST_DECRYPT_FAIL = 2, /* Unicast decrypt fail */ + BCM_RX_HC_BCMC_DECRYPT_FAIL = 3, /* BCMC decrypt fail */ + BCM_RX_HC_UNICAST_REPLAY = 4, /* Unicast replay */ + BCM_RX_HC_BCMC_REPLAY = 5, /* BCMC replay */ + BCM_RX_HC_AMPDU_DUP = 6, /* AMPDU DUP */ + BCM_RX_HC_MAX +} bcm_rx_hc_stall_reason_t; + /* * Health Check report structures for sub types of health checks within WL */ @@ -16674,7 +17525,7 @@ typedef struct { /* Health Check report structure for Rx dropped packet failure check */ typedef struct { - uint16 type; + uint16 type; /* WL_HC_RX_DD_STALL */ uint16 length; uint32 bsscfg_idx; uint32 rx_hc_pkts; @@ -16682,31 +17533,78 @@ typedef struct { uint32 rx_hc_alert_th; } wl_rx_hc_info_t; +/* Health Check report structure for Rx dropped packet failure check */ +typedef struct { + uint16 type; /* WL_HC_RX_DD_STALL_V2 */ + uint16 length; + uint8 if_idx; /* interface index on which issue is reported */ + uint8 ac; /* access category on which this problem is seen */ + uint8 pad[2]; /* Reserved */ + uint32 rx_hc_pkts; + uint32 rx_hc_dropped_all; + uint32 rx_hc_alert_th; + uint32 reason; /* refer to bcm_rx_hc_stall_reason_t above */ + struct ether_addr peer_ea; +} wl_rx_hc_info_v2_t; + /* HE top level command IDs */ enum { - WL_HE_CMD_ENAB = 0, - WL_HE_CMD_FEATURES = 1, - WL_HE_CMD_TWT_SETUP = 2, - WL_HE_CMD_TWT_TEARDOWN = 3, - WL_HE_CMD_TWT_INFO = 4, - WL_HE_CMD_BSSCOLOR = 5, - WL_HE_CMD_PARTIAL_BSSCOLOR = 6, - WL_HE_CMD_CAP = 7, - WL_HE_CMD_STAID = 8, - WL_HE_CMD_RTSDURTHRESH = 10, - WL_HE_CMD_PEDURATION = 11, - WL_HE_CMD_TESTBED_MODE = 12, + WL_HE_CMD_ENAB = 0u, + WL_HE_CMD_FEATURES = 1u, + WL_HE_CMD_TWT_SETUP = 2u, + WL_HE_CMD_TWT_TEARDOWN = 3u, + WL_HE_CMD_TWT_INFO = 4u, + WL_HE_CMD_BSSCOLOR = 5u, + WL_HE_CMD_PARTIAL_BSSCOLOR = 6u, + WL_HE_CMD_CAP = 7u, + WL_HE_CMD_STAID = 8u, + WL_HE_CMD_MUEDCA = 9u, + WL_HE_CMD_RTSDURTHRESH = 10u, + WL_HE_CMD_PEDURATION = 11u, + WL_HE_CMD_TESTBED_MODE = 12u, + WL_HE_CMD_OMI_CONFIG = 13u, + WL_HE_CMD_OMI_STATUS = 14u, + WL_HE_CMD_OMI_ULMU_THROTTLE = 15u, + WL_HE_CMD_ULMU_DISABLE_POLICY = 16u, + WL_HE_CMD_ULMU_DISABLE_STATS = 17u, + WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP = 18u, + WL_HE_CMD_SR_PROHIBIT = 19u, WL_HE_CMD_LAST }; +enum { + WL_HE_MUEDCA_IE = 0, + WL_HE_MUEDCA_SHM = 1, + WL_HE_MUEDCA_LAST +}; + +#ifdef WL11AX + +/* struct for dump MU EDCA IE/SHM paramters */ +typedef struct wl_he_muedca_ie_v1 { + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + uint8 mu_qos_info; + he_mu_ac_param_record_t param_ac[AC_COUNT]; +} wl_he_muedca_ie_v1_t; + +typedef wl_he_muedca_ie_v1_t wl_he_muedca_ie_t; + +#define WL_HE_MUEDCA_VER_1 1 + +#endif /* WL11AX */ + /* TWT top level command IDs */ enum { - WL_TWT_CMD_ENAB = 0, - WL_TWT_CMD_SETUP = 1, - WL_TWT_CMD_TEARDOWN = 2, - WL_TWT_CMD_INFO = 3, - WL_TWT_CMD_AUTOSCHED = 4, - WL_TWT_CMD_EARLY_TERM_TIME = 6, + WL_TWT_CMD_ENAB = 0, + WL_TWT_CMD_SETUP = 1, + WL_TWT_CMD_TEARDOWN = 2, + WL_TWT_CMD_INFO = 3, + WL_TWT_CMD_AUTOSCHED = 4, + WL_TWT_CMD_STATS = 5, + WL_TWT_CMD_EARLY_TERM_TIME = 6, + WL_TWT_CMD_RESP_CONFIG = 7, + WL_TWT_CMD_SPPS_ENAB = 8, WL_TWT_CMD_LAST }; @@ -16717,12 +17615,12 @@ enum { /* HEB top level command IDs */ enum { - WL_HEB_CMD_ENAB = 0, - WL_HEB_CMD_NUM_HEB = 1, - WL_HEB_CMD_COUNTERS = 2, - WL_HEB_CMD_CLEAR_COUNTERS = 3, - WL_HEB_CMD_CONFIG = 4, - WL_HEB_CMD_STATUS = 5, + WL_HEB_CMD_ENAB = 0, + WL_HEB_CMD_NUM_HEB = 1, + WL_HEB_CMD_COUNTERS = 2, + WL_HEB_CMD_CLEAR_COUNTERS = 3, + WL_HEB_CMD_CONFIG = 4, + WL_HEB_CMD_STATUS = 5, WL_HEB_CMD_LAST }; @@ -16779,119 +17677,277 @@ typedef struct wl_heb_status_v1 { wl_heb_reg_status_v1_t heb_status[1]; } wl_heb_status_v1_t; +/* HWA */ +#define WL_HWA_VER_1 1 + +/* HWA top level command IDs */ +typedef enum wl_hwa_cmd_type { + WL_HWA_CMD_ENAB = 0, + WL_HWA_CMD_CAPS = 1, + WL_HWA_CMD_COUNTERS = 2, + WL_HWA_CMD_CLRCNTS = 3, + WL_HWA_CMD_REGDUMP = 4, + WL_HWA_CMD_INDUCE_ERR = 5, + WL_HWA_CMD_LAST +} wl_hwa_cmd_type_t; + +typedef struct wl_hwa_cnts_info_v1 { + uint16 cnt_rxs_filter; /* #filters added */ + uint16 cnt_rxs_chainable; /* #rxchainable matched */ +} wl_hwa_cnts_info_v1_t; + +/* HWA dump info structures */ +typedef struct wl_hwa_hwcaps_info_v1 { + uint16 up; /* is hwa init'd/deint'd */ + uint16 corerev; /* hwa core revision */ + uint32 submodules_mask; /* mask for hwa submodules that are enabled */ +} wl_hwa_hwcaps_info_v1_t; + +typedef struct wl_hwa_cnts_v1 { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + wl_hwa_cnts_info_v1_t hwa_cnts_info[]; /* variable length array with hwa counters */ +} wl_hwa_cnts_v1_t; + +/* All submodules, order is important and define order of initialization. */ +/* Not use enumeration here because these defines are also used in macro */ +#define HWA_SUBMODULES_COMMON 0 /**< Common */ +#define HWA_SUBMODULES_TXPOST 1u /**< TxPost 3a */ +#define HWA_SUBMODULES_RXPOSTFILL 2u /**< RxPost and Fill 1a/1b */ +#define HWA_SUBMODULES_TXDMA 3u /**< TxDMA 3b */ +#define HWA_SUBMODULES_TXS 4u /**< TxStatus 4a */ +#define HWA_SUBMODULES_BUFMGR 5u /**< Buffer Manager, RX and TX. Do this last */ +#define HWA_SUBMODULES_CPL 6u /**< Completion 2b/4b */ +#define HWA_SUBMODULES_RXS 7u /**< RxStatus 2a */ +#define HWA_SUBMODULES_NUM 8u /**< number of submodules */ + +#define HWA_SUBMODULES_ALL 0xFF /* Bitmaps for all submodules */ +#ifdef HWA +#define HWA_SUBMODULE_MASK(submodule) (1u << (submodule)) +#else +#define HWA_SUBMODULE_MASK(submodule) (0) +#endif /* HWA */ +/* + * NOTES: + * wl_twt_sdesc_t is used to support both broadcast TWT and individual TWT. + * Value in bit[0:2] in 'flow_id' field is interpreted differently: + * - flow id for individual TWT (when WL_TWT_FLOW_FLAG_BROADCAST bit is NOT set + * in 'flow_flags' field) + * - flow id as defined in Table 8-248l1 for broadcast TWT (when + * WL_TWT_FLOW_FLAG_BROADCAST bit is set) + * In latter case other bits could be used to differentiate different flows + * in order to support multiple broadcast TWTs with the same flow id. + */ + /* TWT Setup descriptor */ -typedef struct { +typedef struct wl_twt_sdesc { /* Setup Command. */ - uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h, - * valid when bcast_twt is FALSE. - */ - /* Flow attributes */ - uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */ - uint8 flow_id; /* must be between 0 and 7 */ - /* Target Wake Time */ + uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */ + uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */ + uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */ + uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */ + uint8 channel; /* Twt channel - Not used for now */ + uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */ + uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */ uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */ uint32 wake_time_h; /* target wake time - BSS TSF (us) */ uint32 wake_time_l; uint32 wake_dur; /* target wake duration in unit of microseconds */ uint32 wake_int; /* target wake interval */ + uint32 btwt_persistence; /* Broadcast TWT Persistence */ + uint32 wake_int_max; /* max wake interval(uS) for TWT */ + uint8 duty_cycle_min; /* min duty cycle for TWT(Percentage) */ + uint8 pad; + /* deprecated - to be removed */ + uint16 li; - uint16 bid; /* must be between 0 and 255. Set 0xFFFF for auto assignment */ - uint16 li; /* Listen interval: Units in number of beacon intervals */ - uint8 channel; /* twt channel */ - uint8 pad[3]; } wl_twt_sdesc_t; /* Flow flags */ -#define WL_TWT_FLOW_FLAG_BROADCAST (1 << 0) -#define WL_TWT_FLOW_FLAG_IMPLICIT (1 << 1) -#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1 << 2) -#define WL_TWT_FLOW_FLAG_TRIGGER (1 << 3) -#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1 << 4) -#define WL_TWT_FLOW_FLAG_REQUEST (1 << 5) +#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1u << 0u) +#define WL_TWT_FLOW_FLAG_TRIGGER (1u << 1u) +#define WL_TWT_FLOW_FLAG_REQUEST (1u << 2u) +#define WL_TWT_FLOW_FLAG_PROTECT (1u << 3u) +#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 4u) +#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 5u) + +/* Deprecated - To be removed */ +#define WL_TWT_FLOW_FLAG_BROADCAST (1u << 5u) +#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1u << 6u) +#define WL_TWT_FLOW_FLAG_IMPLICIT (1u << 7u) /* Flow id */ -#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */ -#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */ -#define WL_TWT_FLOW_ID_GID_SHIFT 4 +#define WL_TWT_FLOW_ID_FID 0x07u /* flow id */ +#define WL_TWT_FLOW_ID_GID_MASK 0x70u /* group id - broadcast TWT only */ +#define WL_TWT_FLOW_ID_GID_SHIFT 4u -#define WL_TWT_INV_BCAST_ID 0xFFFFu +#define WL_TWT_INV_BCAST_ID 0xFFu #define WL_TWT_INV_FLOW_ID 0xFFu -#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF +/* auto flow_id */ +#define WL_TWT_SETUP_FLOW_ID_AUTO 0xFFu +/* auto broadcast ID */ +#define WL_TWT_SETUP_BCAST_ID_AUTO 0xFFu +/* Infinite persistence for broadcast schedule */ +#define WL_TWT_INFINITE_BTWT_PERSIST 0xFFFFFFFFu + +/* should be larger than what chip supports */ +#define WL_TWT_STATS_MAX_BTWT 4u +#define WL_TWT_STATS_MAX_ITWT 4u /* Wake type */ /* TODO: not yet finalized */ -#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is +#define WL_TWT_TIME_TYPE_BSS 0u /* The time specified in wake_time_h/l is * the BSS TSF time. */ -#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset +#define WL_TWT_TIME_TYPE_OFFSET 1u /* The time specified in wake_time_h/l is an offset * of the TSF time when the iovar is processed. */ +#define WL_TWT_TIME_TYPE_AUTO 2u /* The target wake time is chosen internally by the FW */ -#define WL_TWT_SETUP_VER 0 +#define WL_TWT_SETUP_VER 0u /* HE TWT Setup command */ -typedef struct { +typedef struct wl_twt_setup { /* structure control */ uint16 version; /* structure version */ uint16 length; /* data length (starting after this field) */ /* peer address */ struct ether_addr peer; /* leave it all 0s' for AP */ - /* session id */ - uint16 dialog; /* an arbitrary number to identify the seesion */ + uint8 pad[2]; /* setup descriptor */ wl_twt_sdesc_t desc; + + /* deprecated - to be removed */ + uint16 dialog; + uint8 pad1[2]; } wl_twt_setup_t; -#define WL_TWT_TEARDOWN_VER 0 +/* deprecated -to be removed */ +#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF + +#define WL_TWT_TEARDOWN_VER 0u + +/* twt teardown descriptor */ +typedef struct wl_twt_teardesc { + uint8 negotiation_type; + uint8 flow_id; /* must be between 0 and 7 */ + uint8 bid; /* must be between 0 and 31 */ + bool alltwt; /* all twt teardown - 0 or 1 */ +} wl_twt_teardesc_t; /* HE TWT Teardown command */ -typedef struct { +typedef struct wl_twt_teardown { /* structure control */ uint16 version; /* structure version */ uint16 length; /* data length (starting after this field) */ /* peer address */ struct ether_addr peer; /* leave it all 0s' for AP */ - /* flow attributes */ - uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above. - * (only BROADCAST) is applicable) - */ - uint8 flow_id; /* must be between 0 and 7 */ - uint16 bid; /* must be between 0 and 255 */ + wl_twt_teardesc_t teardesc; /* Teardown descriptor */ + + /* deprecated - to be removed */ + uint8 flow_flags; + uint8 flow_id; + uint8 bid; + uint8 pad; } wl_twt_teardown_t; /* twt information descriptor */ -typedef struct { +typedef struct wl_twt_infodesc { uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */ uint8 flow_id; - uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */ - uint8 pad[1]; + uint8 pad[2]; uint32 next_twt_h; uint32 next_twt_l; -} wl_twt_idesc_t; + /* deprecated - to be removed */ + uint8 wake_type; + uint8 pad1[3]; +} wl_twt_infodesc_t; /* Flow flags */ +#define WL_TWT_INFO_FLAG_ALL_TWT (1u << 0u) /* All TWT */ +#define WL_TWT_INFO_FLAG_RESUME (1u << 1u) /* 1 is TWT Resume, 0 is TWT Suspend */ + +/* deprecated - to be removed */ #define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */ #define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */ #define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */ -#define WL_TWT_INFO_FLAG_RESUME (1 << 4) /* 1 is TWT Resume, 0 is TWT Suspend */ +typedef wl_twt_infodesc_t wl_twt_idesc_t; -#define WL_TWT_INFO_VER 0 +#define WL_TWT_INFO_VER 0u /* HE TWT Information command */ -typedef struct { +typedef struct wl_twt_info { /* structure control */ uint16 version; /* structure version */ uint16 length; /* data length (starting after this field) */ /* peer address */ struct ether_addr peer; /* leave it all 0s' for AP */ uint8 pad[2]; - /* Temporary change. to be removed */ + wl_twt_infodesc_t infodesc; /* information descriptor */ + /* deprecated - to be removed */ wl_twt_idesc_t desc; - /* information descriptor */ - wl_twt_idesc_t idesc; } wl_twt_info_t; +#define WL_TWT_PEER_STATS_VERSION_1 1u +typedef struct wl_twt_peer_stats_v1 { + uint16 version; + uint16 length; + struct ether_addr peer; + uint8 PAD[2]; + uint8 id; + uint8 flow_flags; + uint8 PAD[2]; + uint32 sp_seq; /* sequence number of the service period */ + uint32 tx_ucast_pkts; + uint32 tx_pkts_min; + uint32 tx_pkts_max; + uint32 tx_pkts_avg; + uint32 tx_failures; + uint32 rx_ucast_pkts; + uint32 rx_pkts_min; + uint32 rx_pkts_max; + uint32 rx_pkts_avg; + uint32 rx_pkts_retried; +} wl_twt_peer_stats_v1_t; + +#define WL_TWT_STATS_VERSION_1 1 +typedef struct wl_twt_stats_v1 { + uint16 version; + uint16 length; + uint32 num_stats; /* number of peer stats in the peer_stats_list */ + wl_twt_peer_stats_v1_t peer_stats_list[]; +} wl_twt_stats_v1_t; + +#define WL_TWT_STATS_CMD_VERSION_1 1 +#define WL_TWT_STATS_CMD_FLAGS_RESET (1u << 0u) +/* HE TWT stats command */ +typedef struct wl_twt_stats_cmd_v1 { + uint16 version; + uint16 length; + struct ether_addr peer; + uint8 PAD[2]; + uint16 flags; /* see WL_TWT_STATS_CMD_FLAGS */ + uint8 num_fid; + uint8 num_bid; + uint8 fid_list[WL_TWT_STATS_MAX_ITWT]; + uint8 bid_list[WL_TWT_STATS_MAX_BTWT]; +} wl_twt_stats_cmd_v1_t; + +#define WL_TWT_RESP_CFG_VER 0u + +#define WL_TWT_CMD_RESP_CFG_TYPE_ALTERNATE 0u +#define WL_TWT_CMD_RESP_CFG_TYPE_DICTATE 1u +/* HE TWT resp command */ +typedef struct wl_twt_resp_cfg { + /* structure control */ + uint16 version; /* Structure version */ + uint16 length; /* Data length (starting after this field) */ + uint8 dc_max; /* Max supported duty cycle for single TWT */ + uint8 resp_type; /* Resp. type(Alt/dict) if duty cycle>max duty cycle */ +} wl_twt_resp_cfg_t; + /* Current version for wlc_clm_power_limits_req_t structure and flags */ #define WLC_CLM_POWER_LIMITS_REQ_VERSION 1 /* "clm_power_limits" iovar request structure */ @@ -16958,6 +18014,7 @@ enum wl_mbo_cmd_ids { WL_MBO_CMD_NBR_INFO_CACHE = 11, WL_MBO_CMD_ANQPO_SUPPORT = 12, WL_MBO_CMD_DBG_EVENT_CHECK = 13, + WL_MBO_CMD_EVENT_MASK = 14, /* Add before this !! */ WL_MBO_CMD_LAST }; @@ -16973,9 +18030,14 @@ enum wl_mbo_xtlv_id { WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8, WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9, WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa, - WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb + WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb, + WL_MBO_XTLV_BIT_MASK = 0xc }; +/* event bit mask flags for MBO */ +#define MBO_EVT_BIT_MASK_CELLULAR_SWITCH 0x0001 /* Evt bit mask to enab cellular switch */ +#define MBO_EVT_BIT_MASK_BTM_REQ_RCVD 0x0002 /* Evt bit mask to enab BTM req rcvd */ + typedef struct wl_mbo_counters { /* No of transition req recvd */ uint16 trans_req_rcvd; @@ -17190,8 +18252,6 @@ typedef struct wlc_leaked_infra_guard_marker { #define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */ #define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */ #define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */ -#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */ -#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */ #define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */ #define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */ @@ -17438,7 +18498,7 @@ typedef struct sssr_reg_info_v2 { } base_regs; struct { uint32 resetctrl; - uint32 itopoobb; + uint32 extrsrcreq; } wrapper_regs; } arm_regs; struct { @@ -17448,7 +18508,7 @@ typedef struct sssr_reg_info_v2 { uint32 clockcontrolstatus_val; } base_regs; struct { - uint32 itopoobb; + uint32 extrsrcreq; } wrapper_regs; } pcie_regs; struct { @@ -17460,7 +18520,7 @@ typedef struct sssr_reg_info_v2 { } base_regs; struct { uint32 resetctrl; - uint32 itopoobb; + uint32 extrsrcreq; uint32 ioctrl; uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS]; } wrapper_regs; @@ -17477,6 +18537,13 @@ typedef sssr_reg_info_v0_t sssr_reg_info_t; #define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_0 #endif // endif +/* A wrapper structure for all versions of SSSR register information structures */ +typedef union sssr_reg_info { + sssr_reg_info_v0_t rev0; + sssr_reg_info_v1_t rev1; + sssr_reg_info_v2_t rev2; +} sssr_reg_info_cmn_t; + /* ADaptive Power Save(ADPS) structure definition */ #define WL_ADPS_IOV_MAJOR_VER 1 #define WL_ADPS_IOV_MINOR_VER 0 @@ -17565,6 +18632,40 @@ typedef struct wlc_btc_2gchain_dis { uint8 flag; } wlc_btc_2gchain_dis_t; +/* TDLS structure definition */ +#define WL_TDLS_T_VERSION_V1 1 +typedef struct wl_tdls_dump_summary_v1 { + uint16 version; + uint16 length; /* length of the entire structure */ + uint32 txsetupreq; /* tdls setup req sent */ + uint32 txsetupresp; /* tdls setup resp sent */ + uint32 txsetupcfm; /* tdls setup confirm sent */ + uint32 txteardown; /* tdls teardwon frames sent */ + uint32 txptireq; /* tdls pti req frames sent */ + uint32 txptiresp; /* tdls pti resp frames sent */ + uint32 txchswreq; /* tdls chsw req frames sent */ + uint32 txchswresp; /* tdls chsw resp frame sent */ + uint32 rxsetupreq; /* tdls setup req rcvd */ + uint32 rxdsetupresp; /* tdls setup resp rcvd */ + uint32 rxsetupcfm; /* tdls setup confirm rcvd */ + uint32 rxteardown; /* tdls teardown frames rcvd */ + uint32 rxptireq; /* tdls pti req frames rcvd */ + uint32 rxptiresp; /* tdls pti resp frames rcvd */ + uint32 rxchswreq; /* tdls chsw req frames rcvd */ + uint32 rxchswresp; /* tdls chsw resp frames rcvd */ + uint32 discard; /* frames discarded due to full buffer */ + uint32 ubuffered; /* frames buffered by TDLS txmod */ + uint32 buf_reinserted; /* frames reinserted */ + uint32 idletime; /* time since no traffic on tdls link */ + uint32 uptime; /* time since tdls link connected */ + uint32 tx_cnt; /* frames txed over tdls link */ + uint32 rx_cnt; /* frames rcvd over tdls link */ + uint32 blist_cnt; /* number of tdls black list */ + uint32 scb_flags; /* connected tdls scb flags */ + struct ether_addr peer_addr; /* connected peer addr */ + uint8 padding[2]; +} wl_tdls_dump_summary_v1_t; + #define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1 #define WLC_BTC_2GCHAIN_DIS_VER1 0x1 #define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6 @@ -17592,6 +18693,25 @@ typedef struct wl_btc_wifi_prot_m1_m4 { /* --- End BTCX WiFi Protection --- */ +/* --- BTCX ULMU disable (btc_ulmu_config iovar) --- */ + +/* Version number */ +#define WL_BTC_ULMU_CONFIG_VER_1 1 +typedef struct wl_btc_ulmu_config_v1 { + uint16 version; /* btc_ulmu_config version */ + uint16 len; /* Total length */ + uint32 ulmu_bt_task_bm; /* BT Task bimtap for ULMU disable */ + uint32 ulmu_bt_period_th; /* BT period thresh for ULMU disable */ +} wl_btc_ulmu_config_v1_t; + +/* --- End BTCX ULMU config --- */ + +#define RPSNOA_IOV_MAJOR_VER 1 +#define RPSNOA_IOV_MINOR_VER 1 +#define RPSNOA_IOV_MAJOR_VER_SHIFT 8 +#define RPSNOA_IOV_VERSION \ + ((RPSNOA_IOV_MAJOR_VER << RPSNOA_IOV_MAJOR_VER_SHIFT)| RPSNOA_IOV_MINOR_VER) + enum wl_rpsnoa_cmd_ids { WL_RPSNOA_CMD_ENABLE = 1, WL_RPSNOA_CMD_STATUS, @@ -17650,6 +18770,10 @@ enum wl_ifstats_xtlv_id { WL_IFSTATS_XTLV_MAC_ADDR = 3, WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */ WL_IFSTATS_XTLV_BUS_PCIE = 5, + WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS = 6, + WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH = 7, + /* history of blocks freed most recently */ + WL_STATS_XTLV_FBINFO_STATS = 8, /* Report data across all SCBs using ecounters */ /* STA_info ecounters */ @@ -17665,6 +18789,12 @@ enum wl_ifstats_xtlv_id { WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103, /* Global NAN stats */ WL_IFSTATS_XTLV_NAN_STATS = 0x104, + WL_IFSTATS_XTLV_CHAN_STATS = 0x105, + /* TDLS state */ + WL_IFSTATS_XTLV_IF_TDLS_STATE = 0x106, + WL_IFSTATS_XTLV_KEY_PLUMB_INFO = 0x107, + /* HE TX related stats */ + WL_IFSTATS_XTLV_HE_TXMU_STATS = 0x108, /* Per-slice information * Per-interface reporting could also include slice specific data @@ -17694,6 +18824,16 @@ enum wl_ifstats_xtlv_id { WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D, WL_SLICESTATS_XTLV_HIST_TX_STATS = 0x30E, WL_SLICESTATS_XTLV_HIST_RX_STATS = 0x30F, + /* TX histograms */ + WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS = 0x310, + /* TX queue depth */ + WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH = 0x311, + /* Latency instrumentation debug */ + WL_STATS_XTLV_WL_QUEUE_STOP = 0x312, + /* Beamforming counters */ + WL_IFSTATS_XTLV_WL_SLICE_TXBF = 0x313, + /* Per-slice BTCOEX task duration stats */ + WL_IFSTATS_XTLV_WL_SLICE_BTCOEX_TSKDUR_STATS = 0x314, /* Per-interface */ /* XTLV container for reporting */ WL_IFSTATS_XTLV_IF = 0x501, @@ -17706,15 +18846,23 @@ enum wl_ifstats_xtlv_id { /* AMPDU stats on per-IF */ WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505, WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506, - WL_IFSTATS_XTLV_WL_PWRSTATS_AWDL = 0x507, WL_IFSTATS_XTLV_IF_LQM = 0x508, /* Interface specific state capture in periodic fashion */ WL_IFSTATS_XTLV_IF_PERIODIC_STATE = 0x509, /* Event statistics on per-IF */ WL_IFSTATS_XTLV_IF_EVENT_STATS = 0x50A, + /* Infra HE specific */ + WL_IFSTATS_XTLV_INFRA_SPECIFIC_HE = 0x50B, + /* Roam statistics */ + WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC = 0x50C, + WL_IFSTATS_XTLV_ROAM_STATS_EVENT = 0x50D, /* ecounters for nan */ /* nan slot stats */ - WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601 + WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601, + /* Ecounters for NDP session status */ + WL_STATS_XTLV_NDP_SESSION_STATUS = 0x602, + /* NAN disc frame status ecounters */ + WL_STATS_XTLV_NAN_DISC_FRM_STATUS = 0x603 }; /* current version of wl_stats_report_t structure for request */ @@ -17779,6 +18927,34 @@ typedef struct wl_infra_stats { uint32 tbtt; } wl_if_infra_stats_t; +#define WL_INFRA_STATS_HE_VERSION_V1 (1u) +/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC_HE */ +typedef struct wl_infra_stats_he { + uint16 version; /**< version of the structure */ + uint16 length; + uint32 PAD; /**< Explicit padding */ + + /* DL SU MPDUs and total number of bytes */ + uint64 dlsu_mpdudata; + uint64 dlsu_mpdu_bytes; + + /* DL MUMIMO MPDUs and total number of bytes */ + uint64 dlmumimo_mpdudata; + uint64 dlmumimo_mpdu_bytes; + + /* DL OFDMA MPDUs and total number of bytes */ + uint64 dlofdma_mpdudata; + uint64 dlofdma_mpdu_bytes; + + /* UL SU MPDUs and total number of bytes */ + uint64 ulsu_mpdudata; + uint64 ulsu_mpdu_bytes; + + /* ULOFDMA MPSUs and total number of bytes */ + uint64 ulofdma_mpdudata; + uint64 ulofdma_mpdu_bytes; +} wl_if_infra_stats_he_t; + #define LTECOEX_STATS_VER 1 typedef struct wlc_ltecoex_stats { @@ -18055,6 +19231,7 @@ typedef struct wl_scb_ecounters_v2 { */ #define WL_NAN_SLOT_ECOUNTERS_VERSION_1 1 +#define WL_NAN_SLOT_ECOUNTERS_VERSION_2 2 typedef struct wl_nan_slot_ecounters_v1 { uint16 version; /* version field */ @@ -18065,6 +19242,55 @@ typedef struct wl_nan_slot_ecounters_v1 { nan_sched_stats_t sched; /* sched stats */ wl_nan_mac_stats_t mac; /* mac stats */ } wl_nan_slot_ecounters_v1_t; + +typedef struct wl_nan_slot_ecounters_v2 { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */ + uint16 cur_slot_idx; /* cur nan slot index */ + uint16 pad; + nan_sched_stats_t sched; /* sched stats */ + wl_nan_mac_stats_t mac; /* mac stats */ + /* for v2 */ + uint16 bcn_rx_drop_rssi; /* Beacon received but ignored due to weak rssi */ + uint16 bcn_rx_drop_rssi_5g; /* 5G Beacon received but ignored due to weak rssi */ + uint16 cnt_rssi_close; /* cnt of beacon rssi > rssi_close received */ + uint16 cnt_rssi_close_5g; /* cnt of 5G beacon rssi > rssi_close received */ + uint16 cnt_rssi_mid; /* cnt of beacon rssi > rssi_middle received */ + uint16 cnt_rssi_mid_5g; /* cnt of 5G beacon rssi > rssi_middle received */ + uint16 bcn_txfail; /* Beacon sending failure count */ + uint16 bcn_txfail_5g; /* sending 5G beacon failure count */ +} wl_nan_slot_ecounters_v2_t; + +/* WL_STATS_XTLV_NDP_SESSION_STATUS for ecounters */ +#define WL_NAN_SESSION_STATUS_EC_VERSION_1 1 +typedef struct wl_nan_ndp_session_status_v1_s { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint8 role; /* Role of NAN device */ + uint8 ndp_id; /* local NDP ID */ + uint8 state; /* NDP state */ + uint8 nan_sec_csid; /* security csid */ + struct ether_addr lndi_addr; /* Local NDI addr */ + struct ether_addr pnmi_addr; /* Peer NMI addr */ + struct ether_addr pndi_addr; /* Peer NDI addr */ + uint8 dpe_state; /* DPE state to know where timeout/dpend has come */ + uint8 pad; +} wl_nan_ndp_session_status_v1_t; + +/* WL_STATS_XTLV_NAN_DISC_FRM_STATUS for ecounters */ +#define WL_NAN_DISC_FRM_STATUS_EC_VERSION_1 1 +typedef struct wl_nan_disc_frame_status_v1_s { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint8 type; /* wl_nan_frame_type_t */ + uint8 status; /* For TX status, success or failure */ + uint8 reason_code; /* to identify reason when status is failure */ + uint8 inst_id; /* Publish or subscribe instance id */ + uint8 req_id; /* Requestor instance id */ + uint8 pad; + uint16 token; /* seq num to keep track of pkts sent by host */ +} wl_nan_disc_frame_status_v1_t; /* * BT log definitions */ @@ -18227,35 +19453,36 @@ typedef struct { uint8 bssid [6]; /* padding to get 32 bits alignment */ } rmc_candidate_info_v1_t; -#define WL_FILTER_IE_VERSION 1 +#define WL_FILTER_IE_VERSION 1 /* deprecated */ enum wl_filter_ie_options { - WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */ - WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */ - WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */ - WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */ - WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */ + WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */ + WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */ + WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */ + WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */ + WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */ }; typedef struct wl_filter_ie_tlv { - uint16 id; - uint16 len; /* sub option length + pattern length */ - uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */ + uint16 id; /* elelment id [ + ext id ] */ + uint16 len; /* sub option length + pattern length */ + uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */ } wl_filter_ie_tlv_t; -typedef struct wl_filter_ie_iov { - uint16 version; /* Structure version */ - uint16 len; /* Total length of the structure */ - uint16 fixed_length; /* Total length of fixed fields */ - uint8 option; /* Filter action - check for suboption */ - uint8 pad[1]; /* Align to 4 bytes */ - uint32 pktflag; /* frame type */ - uint8 tlvs[]; /* variable data (zero in for list ,clearall) */ +#define WL_FILTER_IE_VERSION_1 1 /* the latest version */ +typedef struct wl_filter_ie_iov_v1 { + uint16 version; /* Structure version */ + uint16 len; /* Total length of the structure */ + uint16 fixed_length; /* Total length of fixed fields */ + uint8 option; /* Filter action - check for suboption */ + uint8 pad[1]; /* Align to 4 bytes */ + uint32 pktflag; /* frame type - FC_XXXX */ + uint8 tlvs[]; /* variable data (zero in for list ,clearall) */ } wl_filter_ie_iov_v1_t; /* Event aggregation config */ #define EVENT_AGGR_CFG_VERSION 1 -#define EVENT_AGGR_DISABLED 0x0 -#define EVENT_AGGR_ENABLED 0x1 +#define EVENT_AGGR_DISABLED 0x0 +#define EVENT_AGGR_ENABLED 0x1 #define EVENT_AGGR_BUFSIZE_MAX 1512 #define EVENT_AGGR_BUFSIZE_MIN 512 @@ -18322,15 +19549,15 @@ typedef struct nan_slot_event_data { uint32 band; /* current band (2G/5G) for which the event is received */ } nan_slot_event_data_t; +#ifndef BCMUTILS_ERR_CODES + /* SAE (Simultaneous Authentication of Equals) error codes. * These error codes are local. */ -#define WL_SAE_E_BASE -3072 - /* SAE status codes are reserved from -3072 to -4095 (1K) */ -enum WL_SAE_E_STATUS_CODES { +enum wl_sae_status { WL_SAE_E_AUTH_FAILURE = -3072, /* Discard silently */ WL_SAE_E_AUTH_DISCARD = -3073, @@ -18383,6 +19610,8 @@ typedef enum wlc_pmk_psk_hash_status { WL_PMK_E_PSK_NOMEM = -5124 } wlc_pmk_psk_hash_status_t; +#endif /* BCMUTILS_ERR_CODES */ + /* Block Channel */ #define WL_BLOCK_CHANNEL_VER_1 1u @@ -18546,6 +19775,7 @@ typedef struct { typedef enum sdc_trigger_types { SDC_TYPE_STA_ONBOARD_DEBUG = 1, + SDC_TYPE_SCAN_DEBUG = 2, #ifdef SDC_TEST /* * This is for test purpose only. Don't assign specific value. @@ -18572,7 +19802,8 @@ enum wl_slice_hist_stats_xtlv_id { #define WL_HIST_COMPACT_TOSS_STATS_TX_VER_1 (1u) #define WL_HIST_COMPACT_TOSS_STATS_RX_VER_1 (1u) -/* [see HIST_TOSS_xxxx macros] +/* Format of running toss reasons with seq + * [see HIST_TOSS_xxxx macros] * bits [7..0] : 8 bits : toss sts. * [11..8] : cfgidx * [15..12]: ac @@ -18587,9 +19818,14 @@ enum wl_slice_hist_stats_xtlv_id { #define HIST_TOSS_SEQ_POS (16u) #define HIST_TOSS_SEQ_MASK (0xffff0000u) -#define WLC_SDC_COMPACT_TOSS_REASON(sts, ifidx, ac, seq) \ - ((sts) | ((ifidx) << HIST_TOSS_CFGIDX_POS) | ((ac) << HIST_TOSS_AC_POS) | \ - (seq << HIST_TOSS_SEQ_POS)) +/* Format of toss reasons with count + * bits [15..0] : 16 bits : toss reason + * bits [31..16]: 16 bits : count + */ +#define HIST_TOSS_RC_REASON_POS (0u) +#define HIST_TOSS_RC_REASON_MASK (0xffffu) +#define HIST_TOSS_RC_COUNT_POS (16u) +#define HIST_TOSS_RC_COUNT_MASK (0xffff0000u) typedef struct { uint16 version; @@ -18602,6 +19838,25 @@ typedef struct { uint32 hist_toss_counts[WLC_HIST_TOSS_LEN]; /* toss counts corr to reasons */ } wl_hist_compact_toss_stats_v1_t; +#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_2 (2u) +#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_2 (2u) + +typedef struct { + uint16 version; + uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */ + uint8 htr_num; /* number of elements in htr_running or htr_rc */ + uint16 htr_rnidx; /* htr_running[rnidx-1] has latest data */ + uint16 htr_rcidx; /* htr_rc[rcidx-1] has latest data */ + uint32 htr_running[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as + * per WLC_SDC_COMPACT_TOSS_REASON() format + */ + uint32 htr_rn_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_running data */ + uint32 htr_rc[WLC_HIST_TOSS_LEN]; /* last 8 toss reasons and counts in + * WLC_SDC_COMPACT_TOSS_RC() format + */ + uint32 htr_rc_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_rc */ +} wl_hist_compact_toss_stats_v2_t; + /* ***END of SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */ #endif /* BCM_SDC */ @@ -18630,7 +19885,860 @@ typedef struct wl_avs_info_v1 { enum wl_sc_cmd { WL_SC_CMD_DBG = 0, WL_SC_CMD_CNX = 1, + WL_SC_CMD_CAP = 2, + WL_SC_CMD_CONFIG = 3, WL_SC_CMD_LAST }; +/* WBUS sub-command IDs for unit test */ +#define WL_WBUS_INA_SLOT_START 0x01u /**< Inactive slot start sub command ID. */ +#define WL_WBUS_INA_SLOT_STOP 0x02u /**< Inactive slot stop sub command ID. */ + +/* WBUS (WiFi BT uniform scheduler) command IDs */ +enum wl_wbus_cmd { + WL_WBUS_CMD_VER = 0, + WL_WBUS_CMD_STATS = 1, + WL_WBUS_CMD_UNIT_TEST = 2, + WL_WBUS_CMD_BT_TEST = 3, + WL_WBUS_CMD_CAP = 4, + WL_WBUS_CMD_LAST +}; + +#define WBUS_BT_SCHED_TEST_PARAMS_VER_1 1 + +typedef struct wbus_bt_sched_test_params_v1 { + uint16 version; + uint16 pad; + uint32 flags; + uint32 action; + uint32 duration; + uint32 interval; +} wbus_bt_sched_test_params_v1_t; + +#define WBUS_BT_SCHED_ADD 0u +#define WBUS_BT_SCHED_REMOVE 1u +#define WBUS_BT_SCHED_INVALID 0xFFu + +#define KEY_UPDATE_INFO_VER_V1 1 +typedef struct key_update_info_v1 +{ + uint16 ver; + uint8 pad; + uint8 flags; + uint32 timestamp; + uint32 algo; + uint32 key_flags; + struct ether_addr ea; + struct ether_addr sa; +} key_update_info_v1_t; + +/* Key update flag bit field */ +#define KEY_UPD_FLAG_ADD_KEY 0x1 /* 0 - Removal, 1 - Add key */ + +#ifdef WLLLW +/* LLW Session */ +#define LLW_VERSION 1 +#define LLW_STATS_VERSION 1 + +/* LLW roles */ +#define LLW_ROLE_SCHEDULER 0 +#define LLW_ROLE_CLIENT 1 + +/* LLW modes */ +#define LLW_MODE_GAPS 0 +#define LLW_MODE_BACK_TO_BACK 1 + +/* LLW session max values */ +#define LLW_MAX_SESSION_ID 10 +#define LLW_MAX_FLOW_ID 40 +#define LLW_MAX_CLIENT_NUM 15 +#define LLW_MAX_GAPS_PERIOD 20 +#define LLW_MAX_GAPS_VAR 3 +#define LLW_MAX_RETX_CNT 10 +#define LLW_MAX_AIFSN EDCF_AIFSN_MAX +#define LLW_MAX_CWMIN EDCF_ECW_MAX +#define LLW_MAX_CWMAX EDCF_ECW_MAX +#define LLW_MAX_PER_NUMERATOR 100 +#define LLW_MAX_PER_DENOM 10000 +#define LLW_MAX_CLIENT_ID 15 +#define LLW_MAX_PKT_SIZE 1500 +#define LLW_MAX_PKT_NUM 10 +#define LLW_MAX_MCS 9 +#define LLW_MAX_NUM_STREAMS 8 +#define LLW_MAX_IBS 32 + +/* Per LLW session config */ +/* WL_LLW_CMD_SESSION_CREATE, WL_LLW_CMD_SESSION_UPDATE */ +typedef struct wl_llw_session_cfg { + uint8 session_id; + uint8 role; + uint8 mode; + uint8 client_id; + uint8 gaps_period; + uint8 gaps_var; + uint8 aifsn; + uint8 ecwmin; /* exponent value for minimum contention window */ + uint8 ecwmax; /* exponent value for maximum contention window */ + uint8 mcs; + uint8 num_streams; + uint8 ibs; /* interblock spacing in usecs, for spacing between Transaction Blocks */ + uint16 ul_pkt_size; + uint16 dl_pkt_size; + uint16 per_denom; /* denominator for target PER */ + uint8 per_numerator; /* this value divided by per_denom gives the target PER */ + uint8 dl_pkt_num; + uint8 client_num; + uint8 retx_cnt; + uint8 pwr_save; + uint8 auto_ba; /* automatic RX/TX BA session setup (no negotiation needed) */ + uint8 if_index; + uint8 padding[3]; + struct ether_addr multicast_addr; + struct ether_addr scheduler_addr; +} wl_llw_session_cfg_t; + +/* WL_LLW_CMD_SESSION_DELETE, WL_LLW_CMD_SESSION_ENABLE, WL_LLW_CMD_SESSION_DISABLE, */ +/* WL_LLW_CMD_SESSION_GET */ +typedef struct wl_llw_session_cmd { + uint8 session_id; + uint8 padding[3]; +} wl_llw_session_cmd_t; + +/* LLW client config */ +/* WL_LLW_CMD_CLIENT_ADD, WL_LLW_CMD_CLIENT_DELETE, WL_LLW_CMD_CLIENT_GET */ +typedef struct wl_llw_client_cfg { + uint8 session_id; + uint8 client_id; + struct ether_addr mac; +} wl_llw_client_cfg_t; + +/* Get list of session IDs from FW */ +/* WL_LLW_CMD_SESSION_ID */ +typedef struct llw_session_id_list { + uint8 id_count; /* Number of session IDs */ + uint8 list[]; /* list of session IDs */ +} llw_session_id_list_t; + +/* LLW XTLV structures */ +typedef struct wl_llw_iov_cmd { + uint16 version; + uint8 cmd_cnt; + uint8 pad; + uint8 cmds[]; +} wl_llw_iov_cmd_t; + +typedef struct wl_llw_iov_sub_cmd { + uint16 type; + uint16 len; + union { + int32 status; /* Processed status - Set by FW */ + uint32 options; /* Command Process Options - Set by Host */ + } u; + uint8 data[]; +} wl_llw_iov_sub_cmd_t; + +/* to be used in type field of wl_llw_iov_sub_cmd_t structure while issuing LLW commands */ +typedef enum wl_llw_sub_cmd_xtlv_id { + WL_LLW_CMD_SESSION_ID, + WL_LLW_CMD_SESSION_CREATE, + WL_LLW_CMD_SESSION_DELETE, + WL_LLW_CMD_SESSION_UPDATE, + WL_LLW_CMD_SESSION_ENABLE, + WL_LLW_CMD_SESSION_DISABLE, + WL_LLW_CMD_SESSION_GET, + WL_LLW_CMD_CLIENT_ADD, + WL_LLW_CMD_CLIENT_DELETE, + WL_LLW_CMD_CLIENT_GET, + WL_LLW_CMD_FLOW_ADD, + WL_LLW_CMD_FLOW_DELETE, + WL_LLW_CMD_FLOW_GET, + WL_LLW_CMD_STATS +} wl_llw_sub_cmd_xtlv_id_t; + +/* LLW stats */ +typedef enum wl_llw_xtlv { + WL_LLW_XTLV_STATS +} wl_llw_xtlv_t; + +typedef struct wl_llw_stats { + uint32 txpackets; + uint32 txbytes; + uint32 txrts; + uint32 txnocts; + uint32 txnoack; + uint32 txfail; + uint32 txretry; + uint32 txdropped; + uint32 tx_avg_q_time; + uint32 tx_min_q_time; + uint32 tx_max_q_time; + uint32 tx_avg_rem_lifetime; + uint32 tx_min_rem_lifetime; + uint32 tx_max_rem_lifetime; + uint32 rxpackets; + uint32 rxbytes; + uint32 rxfail; + uint32 rxretry; + uint32 txschedfrm; + uint32 retxschedfrm; +} wl_llw_stats_t; + +typedef struct wl_llw_stats_hdr { + uint16 version; + uint16 stats_cnt; + uint32 tot_len; + uint8 stat_xtlvs[]; +} wl_llw_stats_hdr_t; + +/* WL_LLW_XTLV_STATS */ +typedef struct wl_llw_stats_xtlv { + uint16 type; + uint16 len; + uint8 stats[]; +} wl_llw_stats_xtlv_t; + +/* WL_LLW_CMD_STATS */ +typedef struct wl_llw_stats_cmd { + uint8 session_id; + uint8 client_id; + uint16 padding; +} wl_llw_stats_cmd_t; + +/* LLW flow ring ID config */ +/* WL_LLW_CMD_FLOW_ADD, WL_LLW_CMD_FLOW_DELETE, WL_LLW_CMD_FLOW_GET */ +typedef struct wl_llw_flow_cfg { + uint8 session_id; + uint8 flow_id; + uint16 padding; +} wl_llw_flow_cfg_t; +#endif /* End of LLW Session */ + +#define WL_OMI_CONFIG_VERSION_1 1u + +/* values for valid_bm */ +#define OMI_CONFIG_VALID_BMP_RXNSS 0x0001u +#define OMI_CONFIG_VALID_BMP_BW 0x0002u +#define OMI_CONFIG_VALID_BMP_ULMU_DISABLE 0x0004u +#define OMI_CONFIG_VALID_BMP_TXNSTS 0x0008u +#define OMI_CONFIG_VALID_BMP_ERSU_DISABLE 0x0010u +#define OMI_CONFIG_VALID_BMP_DLMU_RSD_RCM 0x0020u +#define OMI_CONFIG_VALID_BMP_ULMU_DATA_DISABLE 0x0040u +#define OMI_CONFIG_VALID_BMP_ALL 0x0FFFu + +#define OMI_CONFIG_BW_MAX 3u + +typedef struct wl_omi_config { + uint16 valid_bm; /* validity bitmask for each config */ + uint8 rxnss; + uint8 bw; + uint8 ulmu_disable; + uint8 txnsts; + uint8 ersu_disable; + uint8 dlmu_resound_rec; + uint8 ulmu_data_disable; + uint8 pad[3]; +} wl_omi_config_t; + +typedef struct wl_omi_req { + uint16 version; + uint16 len; + wl_omi_config_t config; +} wl_omi_req_v1_t; + +/* Bits for ULMU disable reason */ +#define OMI_ULMU_DISABLED_HOST 0x01u /* Host has disabled through he omi */ +#define OMI_ULMU_DISABLED_NAN 0x04u /* Disabled due to NAN enabled */ +#define OMI_ULMU_DISABLED_BTCOEX 0x08u /* Disabled while in BT Coex activity */ +#define OMI_ULMU_DISABLED_LTECOEX 0x10u /* Disabled due to LTE Coex activity */ +#define OMI_ULMU_DISABLED_NON11AX_CONN 0x20u /* Disabled due to not associated to 11ax AP */ +#define OMI_ULMU_DISABLED_THROTTLE_ENABLE 0x40u /* Disabled due to throttle timer running */ +#define OMI_ULMU_DISABLED_TXCHAIN_DOWNGRADE 0x80u /* Disabled due to Txchain downgrade */ +#define OMI_ULMU_DISABLED_TX_DUTY_CYCLE 0x100u /* Disabled due to tx duty cycle */ + +/* Bits for DLMU Resound Recommendation reason */ +#define OMI_DLMU_RSD_RCM_HOST (0x1u << 0u) /* Host directly set the bit */ +#define OMI_DLMU_RSD_RCM_MPF (0x1u << 1u) /* Set on MPF state change */ + +#define WL_OMI_STATUS_VERSION_1 1u +typedef struct wl_omi_status { + uint16 version; + uint16 len; + wl_omi_config_t omi_pending; /* OMI requests pending */ + uint16 omi_data; /* current OM Control field for completed OMI requests */ + uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */ + uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */ +} wl_omi_status_v1_t; + +#define WL_OMI_STATUS_VERSION_2 2u +typedef struct wl_omi_status_v2 { + uint16 version; + uint16 len; + wl_omi_config_t omi_pending; /* OMI requests pending */ + uint16 omi_data; /* Current OM Control field for completed OMI requests */ + uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */ + uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */ + uint32 dlmu_rsd_rcm_duration; /* Dur (ms) for which ResoundRecommentation is set */ + uint16 dlmu_rsd_rcm_mpf_state; /* The MPF state value */ + uint16 dlmu_rsd_rcm_reason; /* DL MU-MIMO recommendation reasons bitmap */ +} wl_omi_status_v2_t; + +#define WL_ULMU_DISABLE_STATS_VERSION_1 1u +typedef struct wl_ulmu_disable_stats { + uint16 version; + uint16 len; + uint32 ulmu_disable_ts; /* UL OFDMA disabled timestamp (ms) */ + uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */ + uint16 ulmu_disable_count; /* UL MU disable count during current infra association */ + uint32 last_trig_rx_ts; /* Last trigger frame received timestamp (ms) */ + uint16 trig_rx_count; /* No of trigger frames received after last UL OFDMA disable */ + uint16 max_latency; /* Max latency by AP to re-act for UL OFDMA disable request (ms) */ + uint16 min_latency; /* Min latency by AP to re-act for UL OFDMA disable request (ms) */ + uint16 avg_latency; /* Avg latency by AP to re-act for UL OFDMA disable request (ms) */ +} wl_ulmu_disable_stats_v1_t; + +/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */ +enum wl_tx_histogram_id { + WL_TX_HIST_TXQ_ID = 1, + WL_TX_HIST_LOW_TXQ_ID = 2, + WL_TX_HIST_SCBQ_ID = 3, + WL_TX_HIST_EXCUR_TXQ_ID = 4, + WL_TX_HIST_EXCUR_LOW_TXQ_ID = 5 +}; + +/* common tx histogram structure */ +typedef struct wl_tx_hist { + uint16 hist_bmap; /* bit N indicates histogram follows for priority or fifo N */ + uint16 hist_count; /* count of histograms in var len array */ + uint32 hist[1]; /* var len array of histograms each prefix by hist length */ +} wl_tx_hist_t; + +#define WL_TX_HIST_FIXED_LEN (OFFSETOF(wl_tx_hist_t, hist)) +#define WL_TX_HIST_FULL_LEN(num_hist, max_hist_size) \ + (WL_TX_HIST_FIXED_LEN + (num_hist) * \ + (max_hist_size + 1) * sizeof(uint32)) + +/* structure for WL_TX_HIST_TXQ, WL_TX_HIST_EXCUR_TXQ_ID */ +typedef struct wl_tx_hist_txq { + uint32 bsscfg_bmap; /* bitmap of bsscfg indexes associated with this queue */ + wl_tx_hist_t tx_hist; /* tx histograms */ +} wl_tx_hist_txq_t; + +#define WL_TX_HIST_TXQ_FIXED_LEN \ + (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN) +#define WL_TX_HIST_TXQ_FULL_LEN(num_hist, max_hist_size) \ + (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + \ + WL_TX_HIST_FULL_LEN(num_hist, max_hist_size)) + +/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */ +enum wl_txq_stop_histogram_id { + WL_TXQ_STOP_HIST_SW = 1, + WL_TXQ_STOP_HIST_HW = 2, + WL_TXQ_STOP_HIST_PKTS_SW = 3, + WL_TXQ_STOP_HIST_PKTS_HW = 4, + WL_TXQ_STOP_HIST_MAX = WL_TXQ_STOP_HIST_PKTS_HW +}; + +/* common tx histogram structure */ +typedef struct wl_txq_stop_hist { + wl_tx_hist_t tx_hist; /* tx histograms */ +} wl_txq_stop_hist_t; + +#define WL_TXQ_STOP_HIST_FIXED_LEN \ + (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN) +#define WL_TXQ_STOP_HIST_FULL_LEN(num_hist, max_hist_size) \ + (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + \ + WL_TX_HIST_FULL_LEN(num_hist, max_hist_size)) + +/* structure for WL_TX_HIST_LOW_TXQ, WL_TX_HIST_EXCUR_LOW_TXQ_ID */ +typedef struct wl_tx_hist_low_txq { + wl_tx_hist_t tx_hist; /* tx histograms */ +} wl_tx_hist_low_txq_t; + +#define WL_TX_HIST_LOW_TXQ_FIXED_LEN \ + (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN) +#define WL_TX_HIST_LOW_TXQ_FULL_LEN(num_hist, max_hist_size) \ + (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + \ + WL_TX_HIST_FULL_LEN(num_hist, max_hist_size)) + +/* structure for WL_TX_HIST_SCBQ */ +typedef struct wl_tx_hist_scbq { + struct ether_addr ea; /* ether addr of peer */ + uint16 bsscfg_idx; /* bsscfg index */ + wl_tx_hist_t tx_hist; /* tx histograms */ +} wl_tx_hist_scbq_t; + +#define WL_TX_HIST_SCBQ_FIXED_LEN \ + (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + WL_TX_HIST_FIXED_LEN) +#define WL_TX_HIST_SCBQ_FULL_LEN(num_hist, max_hist_size) \ + (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + \ + WL_TX_HIST_FULL_LEN(num_hist, max_hist_size)) + +/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH */ +enum wl_tx_queue_depth_id { + WL_TX_QUEUE_DEPTH_TXQ_ID = 1, + WL_TX_QUEUE_DEPTH_LOW_TXQ_ID = 2, + WL_TX_QUEUE_DEPTH_SCBQ_ID = 3, + WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID = 4, + WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID = 5 +}; + +/* common tx queue depth structure */ +typedef struct wl_tx_queue_depth { + uint16 queue_depth_bmap; /* bitmap of queue depth in var len array */ + uint16 queue_depth_count; /* count of queue depth in var len array */ + uint16 queue_depth[1]; /* var len array of queue depth */ +} wl_tx_queue_depth_t; + +#define WL_TX_QUEUE_DEPTH_FIXED_LEN (OFFSETOF(wl_tx_queue_depth_t, queue_depth)) +#define WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth) \ + (WL_TX_QUEUE_DEPTH_FIXED_LEN + (num_queue_depth) * \ + sizeof(uint16)) + +/* structure for WL_TX_QUEUE_DEPTH_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID */ +typedef struct wl_tx_queue_depth_txq { + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + wl_tx_queue_depth_t tx_queue_depth; /* queue depth */ +} wl_tx_queue_depth_txq_t; + +#define WL_TX_QUEUE_DEPTH_TXQ_FIXED_LEN \ + (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN) +#define WL_TX_QUEUE_DEPTH_TXQ_FULL_LEN(num_queue_depth) \ + (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + \ + WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth)) + +/* structure for WL_TX_QUEUE_DEPTH_LOW_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID */ +typedef struct wl_tx_queue_depth_low_txq { + wl_tx_queue_depth_t tx_queue_depth; /* queue depth */ +} wl_tx_queue_depth_low_txq_t; + +#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FIXED_LEN \ + (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN) +#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FULL_LEN(num_queue_depth) \ + (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + \ + WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth)) + +/* structure for WL_TX_QUEUE_DEPTH_SCBQ_ID */ +typedef struct wl_tx_queue_depth_scbq { + struct ether_addr ea; /* ether addr of peer */ + uint16 bsscfg_idx; /* bsscfg index */ + wl_tx_queue_depth_t tx_queue_depth; /* queue depth */ +} wl_tx_queue_depth_scbq_t; + +#define WL_TX_QUEUE_DEPTH_SCBQ_FIXED_LEN \ + (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN) +#define WL_TX_QUEUE_DEPTH_SCBQ_FULL_LEN(num_queue_depth) \ + (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + \ + WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth)) + +/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS */ +enum wl_pcie_tx_histogram_id { + WL_PCIE_TX_HIST_ID = 1 +}; + +/* structure for PCIE_TX_HIST_ID */ +typedef struct wl_pcie_tx_hist { + uint16 ring_id; /* PCIe ring id */ + uint16 pad; /* 4-byte alignment */ + wl_tx_hist_t tx_hist; /* hist_bmap: + * 0x1=tx histogram + * 0x2=tx status pending histogram + */ +} wl_pcie_tx_hist_t; + +#define WL_PCIE_TX_HIST_FIXED_LEN \ + (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN) +#define WL_PCIE_TX_HIST_FULL_LEN(num_hist, max_hist_size) \ + (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + \ + WL_TX_HIST_FULL_LEN(num_hist, max_hist_size)) + +/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH */ +enum wl_pcie_tx_queue_depth_id { + WL_PCIE_TX_QUEUE_DEPTH_ID = 1 +}; + +/* structure for WL_PCIE_TX_QUEUE_DEPTH_ID */ +typedef struct wl_pcie_tx_queue_depth { + uint16 ring_id; /* PCIe ring id */ + uint16 queue_depth; /* queue depth of ring id */ + uint16 tx_status_pend; /* tx status pending of ring id */ + uint16 pad; /* 4-byte alignment */ +} wl_pcie_tx_queue_depth_t; + +#define WL_PCIE_TX_QUEUE_DEPTH_FIXED_LEN sizeof(wl_pcie_tx_queue_depth_t) + +#define WL_WSEC_DEL_PMK_VER_V1 1u +/* tlv ids for del pmk */ +#define WL_DEL_PMK_TLV_ID 1u +#define WL_DEL_PMKID_TLV_ID 2u +#define WL_DEL_PEER_ADDR_TLV_ID 3u +typedef struct wl_wsec_del_pmk { + uint16 version; + uint16 length; + uint8 xtlvs[]; +} wl_wsec_del_pmk_t; +#define WL_WSEC_DEL_PMK_FIXED_LEN_V1 OFFSETOF(wl_wsec_del_pmk_t, xtlvs) + +#define WLC_RC_ROAM_VER_1 1 + +typedef struct wlc_rcroam { + uint16 ver; + uint16 len; + uint8 data[]; +} wlc_rcroam_t; + +typedef struct wlc_rcroam_info_v1 { + uint16 inactivity_period; /* inactivty monitor period */ + uint16 roam_scan_timeout; + uint16 periodic_roam_scan_timeout; + uint8 roam_trig_step; /* roaming trigger step value */ +} wlc_rcroam_info_v1_t; + +#define WLC_RC_ROAM_CUR_VER WLC_RC_ROAM_VER_1 +#define RCROAM_HDRLEN 4u +#define MAX_RCSCAN_TIMER 300u + +#define WLC_SILENT_ROAM_VER_1 1 +/* silent roam information struct */ +typedef struct wlc_sroam_info_v1 { + /* Silent roam Set/Get value */ + uint8 sroam_on; /* sroam on/off */ + int8 sroam_min_rssi; /* minimum rssi threshold to activate the feature */ + uint8 sroam_rssi_range; /* rssi tolerance to determine stationary status */ + uint8 sroam_score_delta; /* roam score delta value to prune candidate ap */ + uint8 sroam_period_time; /* required monitoring period to trigger roaming scan */ + uint8 sroam_band; /* band setting of roaming scan (all, 5g, 2g) */ + uint8 sroam_inact_cnt; /* tx/rx frame count threshold for checking inactivity */ + /* Silent roam monitor value */ + int8 sroam_ref_rssi; /* reference rssi which is picked when monitoring is + * started. it is updated to current rssi when it's + * out from rssi range + */ + uint8 sroam_time_since; /* elapsed time since start monitoring */ + uint8 pad[3]; + uint32 sroam_txfrm_prev; /* save current tx frame counts */ + uint32 sroam_rxfrm_prev; /* save current rx frame counts */ +} wlc_sroam_info_v1_t; + +typedef struct wlc_sroam { + uint16 ver; + uint16 len; + uint8 data[]; +} wlc_sroam_t; + +#define WLC_SILENT_ROAM_CUR_VER WLC_SILENT_ROAM_VER_1 +#define SROAM_HDRLEN 4u + +#define DEF_SROAM_OFF 0 +#define DEF_SROAM_MIN_RSSI -65 +#define DEF_SROAM_RSSI_RANGE 3u +#define DEF_SROAM_SCORE_DELTA 1u +#define DEF_SROAM_PERIOD_TIME 10u +#define DEF_SROAM_INACT_CNT 5u +#define MAX_SROAM_RSSI -70 +#define MAX_SROAM_RSSI_RANGE 5u +#define MAX_SROAM_SCORE_DELTA 10u +#define MAX_SROAM_PERIOD_TIME 250u +#define SROAM_BAND_AUTO 3u + +/* MACSMPL IOVAR parameters */ +typedef enum wl_macdbg_macsmpl_iovar_id { + WL_MACSMPL_START = 0, + WL_MACSMPL_STOP = 1, + WL_MACSMPL_DUMP = 2, + WL_MACSMPL_STATUS = 3, + WL_MACSMPL_SIZE = 4 +} wl_macdbg_macsmpl_iovar_id_t; + +/* WL_MACSMPL_STATUS values */ +typedef enum wl_macdbg_macsmpl_status { + WL_MACSMPL_STATUS_IDLE = 0, + WL_MACSMPL_STATUS_ACTIVE = 1, + WL_MACSMPL_STATUS_WAIT_FOR_TRIG = 2, + WL_MACSMPL_STATUS_TRIGGERED = 3 +} wl_macdbg_macsmpl_status_t; + +/* WL_MACSMPL_START_PARAM subcommand data */ +typedef struct wl_macsmpl_start_param { + uint32 trig_condition; /* trigger condition */ + uint16 gpio_mux; /* MACControl1 GPIOSel field */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_macsmpl_param_start_t; + +/* MAC SC fragment request data */ +typedef struct wl_macsmpl_frag_req_param { + uint32 offset; /* requested MAC SC fragment offset */ + uint32 size; /* requested MAC SC fragment size, bytes */ +} wl_macsmpl_frag_req_param_t; + +/* MAC SC fragment response data */ +typedef struct wl_macsmpl_frag_resp_param { + uint32 offset; /* MAC SC response fragment offset */ + uint32 size; /* MAC SC reponse fragment size, bytes */ + uint8 data[]; /* MAC SC response fragment data, flexible array */ +} wl_macsmpl_frag_resp_param_t; + +/* MAC SC status data */ +typedef struct wl_macsmpl_status { + uint32 maccontrol1; /* MACControl1 register value */ + uint32 macsc_flags; /* M_MACSC_FLAGS SHM register value */ + uint16 sc_play_ctrl; /* TXE SampleCollectPlayCtrl register value */ + uint16 sc_cur_ptr; /* TXE SampleCollectCurPtr register value */ + uint16 sc_start_ptr; /* TXE SampleCollectStartPtr register value */ + uint16 sc_stop_ptr; /* TXE SampleCollectStopPtr register value */ +} wl_macsmpl_status_t; + +/* WL_MACSMPL parameters data */ +typedef struct wl_macsmpl_param { + wl_macdbg_macsmpl_iovar_id_t subcmd_id; + union { + wl_macsmpl_param_start_t start; + wl_macsmpl_frag_req_param_t frag_req; + } u; +} wl_macsmpl_param_t; + +/* High priority P2P */ +#define WL_HP2P_COUNTERS_VER 2u +typedef struct hp2p_counters { + uint16 frames_queued; + uint16 frames_processed; + uint16 frames_exp; + uint16 frames_preempt; + uint16 frames_retried; + uint16 reserved; /* reserved, rsvd2 and rsvd3 are experimental counters */ + uint16 rsvd2; + uint16 rsvd3; +} hp2p_counters_t; + +typedef struct hp2p_counters_v2 { + uint32 frames_queued; /* Number of AMPDUs processed */ + uint16 frames_exp; /* Number of Lifetime expiries */ + uint16 edt_retry; /* Exceed due to - retry */ + uint16 mpif_reconf; /* MPIF Reconfigure */ + uint16 exceed_delay; /* Exceed delay threshold */ + uint16 edt_nav_thresh; /* Exceed due to - NAV threshold */ + uint16 edt_dc_def; /* Exceed due to - DC based deferral */ + uint16 edt_tx_fifo_full; /* Exceed due to - Tx FIFO full */ + uint16 edt_cts_thresh; /* Exceed due to - CTS threshold */ + uint16 dbg1; /* dbgX are for internal debugging */ + uint16 dbg2; + uint16 dbg3; + uint16 dbg4; + uint16 dbg5; + uint16 dbg6; + uint16 dbg7; + uint16 dbg8; + uint16 dbg9; + uint16 dbg10; +} hp2p_counters_v2_t; + +typedef struct hp2p_counters_hdr { + uint16 version; /* version of hp2p_counters_t structure */ + uint16 len; + uint16 slice_idx; + uint16 pad; + uint8 counters[]; +} hp2p_counters_hdr_t; + +/* TX enable flags */ +#define WL_HP2P_TX_AMPDU 0x0001u +#define WL_HP2P_TX_AMSDU 0x0002u +#define WL_HP2P_TX_RDG 0x0004u + +/* RX enable flags */ +#define WL_HP2P_RX_AMPDU 0x0001u +#define WL_HP2P_RX_AMSDU 0x0002u +#define WL_HP2P_RX_RDG 0x0004u +#define WL_HP2P_RX_AMPDU_REORDER 0x0008u + +/* Max/min values for configuration parameters to check validity */ +#define WL_HP2P_MAX_RETRY_MAX 14u +#define WL_HP2P_MAX_RETRY_MIN 6u +#define WL_HP2P_LATENCY_TARGET_MAX 30u +#define WL_HP2P_BURST_INTERVAL_MAX 64u +#define WL_HP2P_MAX_FIFO 5u +#define WL_HP2P_MAX_UCODE_LATENCY_THR 500u +#define WL_HP2P_MAX_UCODE_RECOV_TO 500u +#define WL_HP2P_MAX_UCODE_NAV_THR 50000u + +#define WL_HP2P_VERSION 1u +typedef struct hp2p_tx_config { + struct ether_addr peer_addr; + uint16 max_burst; + uint16 txop; /* stored in network order (ls octet first) */ + uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG */ + uint8 aci; + uint8 ecw; + uint8 fifo; + uint8 tid; + uint8 burst_interval; + uint8 latency_target; + uint8 max_retry; + uint8 pad; +} hp2p_tx_config_t; + +typedef struct hp2p_rx_config { + struct ether_addr peer_addr; + uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG, AMPDU Reorder */ + uint8 tid; + uint8 pad[3]; +} hp2p_rx_config_t; + +typedef struct hp2p_udbg_config { + uint16 recovery_timeout; /* multiples of 256 usecs */ + uint16 latency_thresh; /* multiples of 256 usecs */ + uint16 enable_trap; /* trap if ucode delay exceeds latency_thresh */ + uint16 nav_thresh; /* in usec */ +} hp2p_udbg_config_t; + +typedef struct hp2p_cmd { + uint16 type; + uint16 len; + uint8 data[]; +} hp2p_cmd_t; + +typedef struct hp2p_cmd_hdr { + uint16 version; + uint16 slice_idx; + uint8 cmd[]; +} hp2p_cmd_hdr_t; + +/* to be used in type field of hp2p_cmd_t structure while issuing HP2P commands */ +typedef enum hp2p_cmd_id { + WL_HP2P_CMD_ENABLE = 0, + WL_HP2P_CMD_TX_CONFIG = 1, + WL_HP2P_CMD_RX_CONFIG = 2, + WL_HP2P_CMD_COUNTERS = 3, + WL_HP2P_CMD_UDBG_CONFIG = 4 +} hp2p_cmd_id_t; + +typedef enum wl_rffe_cmd_type { + WL_RFFE_CMD_DEBUG_MODE = 0, + WL_RFFE_CMD_ELNABYP_MODE = 1, + WL_RFFE_CMD_REG = 2, + WL_RFFE_CMD_LAST +} wl_rffe_cmd_type_t; + +/** RFFE struct passed through ioctl */ +typedef struct { + uint32 regaddr; /**< rFEM_RegAddr */ + uint32 antnum; /**< rFEM AntNum */ + uint32 slaveid; /**< rFEM SlaveID */ + uint32 value; /**< read/write value */ +} rffe_reg_t; + +#ifndef BCMUTILS_ERR_CODES + +/* + * SOE (Security Offload Engine) status codes. + */ + +/* SOE status codes are reserved from -6144 to -7167 (1K) */ + +enum wl_soe_status { + /* Invalid operational context */ + WL_SOE_E_BAD_OP_CONTEXT = -6144, + + /* Invalid operational type */ + WL_SOE_E_BAD_OP_TYPE = -6145, + + /* Failure to get NAF3 encoded scalar */ + WL_SOE_E_BN_GET_NAF3_ERROR = -6146, + + /* Failure to get NAF3 params */ + WL_SOE_E_ECG_GET_NAF3_PARAMS_ERROR = -6147, + + /* FAILURE to get Montgomery params */ + WL_SOE_E_MONT_PARAMS_GET_ERROR = -6148, + + /* Invalid OSL handle */ + WL_SOE_E_BAD_SI_OSH = -6149, + + /* Invalid ECG group */ + WL_SOE_E_BAD_ECG_GROUP = -6150, + + /* Invalid BN context */ + WL_SOE_E_BAD_BN_CTX = -6151, + + /* Invalid SOE core register base address */ + WL_SOE_E_BAD_SOE_REGBASE = -6152, + + /* Invalid SOE context */ + WL_SOE_E_BAD_SOE_CONTXT = -6153, + + /* Number of words are too short (i.e., not enough + * room to encode the PKA sequence) + */ + WL_SOE_E_PKA_SEQUENCE_WORDS_TOO_SHORT = -6154, + + /* Generic bn_get error */ + WL_SOE_E_PKA_BN_GET_ERROR = -6155, + + /* Sequence buf too short for BN */ + WL_SOE_E_PKA_BN_BUF_TOO_SHORT_BN = -6156, + + /* Sequence buf too short for ECG prime */ + WL_SOE_E_PKA_BN_BUF_TOO_SHORT_ECG_PRIME = -6157, + + /* Sequence buf too short for Montgomery N' */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_MONT_PRIME = -6158, + + /* Sequence buf too short for Accumulator registers */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_ACCM_REG = -6159, + + /* Sequence buf too short for the point P */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_P = -6160, + + /* Sequence buf too short for -P */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PN = -6161, + + /* Sequence buf too short for 3P */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3P = -6162, + + /* Sequence buf too short for -3P */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3PN = -6163, + + /* Sequence buf too short for NAF3 scalar */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_NAF3_SCALAR = -6164, + + /* Sequence buf too short for load shift count */ + WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PRE_JMP = -6165, + + /* SOE engine(SHA/PKA) failed to complete the operation */ + WL_SOE_E_ENGINE_UNABLE_TO_COMPLETE = -6166, + + /* Wrong LIR (Long Integer Register) type */ + WL_SOE_E_PKA_BAD_LIR_TYPE = -6167, + + /* Reference count has reached maximum */ + WL_SOE_E_MAX_REF_COUNT_REACHED = -6168, + + /* Failed to get the SOE context reference */ + WL_SOE_E_GET_REF_FAILED = -6169, + + /* Incoming digest length is invalid */ + WL_SOE_E_SHA_WRONG_DIGEST_LEN = -6170 +}; + +#endif /* BCMUTILS_ERR_CODES */ + +#define NR5GCX_STATUS_VER_1 1 +/* NR coex status structures */ +typedef struct wlc_nr5gcx_status_v1 { + uint16 version; /* version info */ + uint16 len; /* status length */ + uint32 mode; /* NR coex status */ + uint32 nr_req_cnt; /* NR req number since last read */ + uint32 nr_dur; /* NR duration since last read, us */ + uint32 nr_duty_cycle; /* NR duty cycle since last read */ + uint32 nr_max_dur; /* NR max duration in a single request */ + uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */ + uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */ + uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */ + uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap, + * 1 event per bit. + */ + uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical + * event whose dur is max + */ +} wlc_nr5gcx_status_v1_t; #endif /* _wlioctl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/wlioctl_defs.h b/bcmdhd.100.10.315.x/include/wlioctl_defs.h index 7d7e550..aa73924 100644 --- a/bcmdhd.100.10.315.x/include/wlioctl_defs.h +++ b/bcmdhd.100.10.315.x/include/wlioctl_defs.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: wlioctl_defs.h 766704 2018-06-09 05:44:18Z $ + * $Id: wlioctl_defs.h 826113 2019-06-18 21:04:03Z $ */ #ifndef wlioctl_defs_h @@ -38,11 +38,14 @@ #define D11AC_IOTYPES #ifndef USE_NEW_RSPEC_DEFS +/* Remove when no referencing branches exist. + * These macros will be used only in older branches (prior to K branch). + * Wl layer in newer branches and trunk use those defined in bcmwifi_rspec.h. + * Non-wl layer in newer branches and trunk may use these as well + * until they are removed. + */ /* WL_RSPEC defines for rate information */ #define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ -#define WL_RSPEC_HE_MCS_MASK 0x0000000F /* HE MCS value */ -#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /* HE Nss value */ -#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */ #define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ #define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ #define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ @@ -62,7 +65,6 @@ #define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ #define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ #define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ -#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */ /* WL_RSPEC_BW field defs */ #define WL_RSPEC_BW_UNSPECIFIED 0 @@ -218,6 +220,12 @@ #define WL_SCANFLAGS_CLIENT_SHIFT 8 /* Bitmask for scan_type */ +/* Reserved flag precludes the use of 0xff for scan_type which is + * interpreted as default for backward compatibility. + * Low priority scan uses currently reserved bit, + * this should be changed as scan_type extended. + * So, reserved flag definition removed. + */ /* Use lower 16 bit for scan flags, the upper 16 bits are for internal use */ #define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ #define WL_SCANFLAGS_LOW_PRIO 0x02 /* Low priority scan */ @@ -235,6 +243,15 @@ #define WL_SCANFLAGS_ROAMSCAN 0x200 /* Roam scan */ #define WL_SCANFLAGS_FWSCAN 0x400 /* Other FW scan */ #define WL_SCANFLAGS_HOSTSCAN 0x800 /* Host scan */ +#define WL_SCANFLAGS_LOW_POWER_SCAN 0x1000 /* LOW power scan, scheduled scan + * only on scancore + */ +#define WL_SCANFLAGS_HIGH_ACCURACY 0x2000 /* High accuracy scan, which needs + * reliable scan results + */ +#define WL_SCANFLAGS_LOW_SPAN 0x4000 /* LOW span scan, which expects + * scan to be completed ASAP + */ /* wl_iscan_results status values */ #define WL_SCAN_RESULTS_SUCCESS 0 @@ -243,6 +260,14 @@ #define WL_SCAN_RESULTS_ABORTED 3 #define WL_SCAN_RESULTS_NO_MEM 4 +/* Flags for parallel scan */ +/* Bitmap to enable/disable rsdb parallel scan, 5g-5g/2g-2g parallel scan + * SCAN_PARALLEL_PASSIVE_5G ==> 5g-5g parallel scan + * SCAN_PARALLEL_PASSIVE_2G ==> 2g-2g parallel scan + */ +#define SCAN_PARALLEL_PASSIVE_5G (0x40) +#define SCAN_PARALLEL_PASSIVE_2G (0x80) + #define SCANOL_ENABLED (1 << 0) #define SCANOL_BCAST_SSID (1 << 1) #define SCANOL_NOTIFY_BCAST_SSID (1 << 2) @@ -373,6 +398,8 @@ /* bit definitions for bcnflags in wl_bss_info */ #define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */ #define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */ +#define WL_BSS_BCNFLAGS_MULTIPLE_BSSID_SET 0x4 /* this AP belongs to a multiple BSSID set */ +#define WL_BSS_BCNFLAGS_NONTRANSMITTED_BSSID 0x8 /* this AP is the transmitted BSSID */ /* bssinfo flag for nbss_cap */ #define VHT_BI_SGI_80MHZ 0x00000100 @@ -452,6 +479,8 @@ #define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ #endif /* BCMCCX || BCMEXTCCX */ #define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ +#define WL_LINK_KEY (1 << 7) /* For linking keys of both cores */ +#define WL_UNLINK_KEY (1 << 8) /* For unlinking keys of both cores */ /* wireless security bitvec */ #define WSEC_NONE 0x0 @@ -527,7 +556,7 @@ #define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ #define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ #if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI) -#define WPA_AUTH_WAPI 0x0400 +#define WPA_AUTH_WAPI 0x0400 /* why it is same as WAPI_AUTH_UNSPECIFIED */ #define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */ #define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */ #define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */ @@ -540,18 +569,17 @@ #define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */ #define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384)) #define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */ -#define WPA3_AUTH_SAE_FBT 0x80000 /* SAE with FT */ #define WPA3_AUTH_OWE 0x100000 /* OWE */ #define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */ #define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */ #define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */ - +#define WPA3_AUTH_SAE_AP_ONLY 0x1000000 /* SAE restriction to connect to pure SAE APs */ /* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */ #define WPA2_AUTH_SHA256 0x8000 #define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ /* pmkid */ -#define MAXPMKID 16 +#define MAXPMKID 16 /* max # PMKID cache entries NDIS */ /* SROM12 changes */ #define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ @@ -898,7 +926,16 @@ #define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */ #define WLC_DUMP_RATESET 322 #define WLC_ECHO 323 -#define WLC_LAST 324 +#define WLC_LAST 324 /* The last ioctl. Also push this + * number when adding new ioctls + */ +/* + * Alert: + * Duplicate a few definitions that irelay requires from epiioctl.h here + * so caller doesn't have to include this file and epiioctl.h . + * If this grows any more, it would be time to move these irelay-specific + * definitions out of the epiioctl.h and into a separate driver common file. + */ #define WLC_SPEC_FLAG 0x80000000 /* For some special IOCTL */ #ifndef EPICTRL_COOKIE #define EPICTRL_COOKIE 0xABADCEDE @@ -1181,6 +1218,7 @@ #define TRIGGER_BADFCS 0x08 #define TRIGGER_BADPLCP 0x10 #define TRIGGER_CRSGLITCH 0x20 +#define TRIGGER_ASYNC 0x40 #define WL_SAMPLEDATA_HEADER_TYPE 1 #define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */ @@ -1255,6 +1293,9 @@ #define WL_TX_POWER_F_PROP11NRATES 0x80 #define WL_TX_POWER_F_UNIT_QDBM 0x100 #define WL_TX_POWER_F_TXCAP 0x200 +#define WL_TX_POWER_F_HE 0x400 +#define WL_TX_POWER_F_RU_RATE 0x800 + /* Message levels */ #define WL_ERROR_VAL 0x00000001 #define WL_TRACE_VAL 0x00000002 @@ -1268,7 +1309,7 @@ #define WL_PRUSR_VAL 0x00000200 #define WL_PS_VAL 0x00000400 #define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ -#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ +#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ #define WL_PORT_VAL 0x00001000 #define WL_DUAL_VAL 0x00002000 #define WL_WSEC_VAL 0x00004000 @@ -1277,9 +1318,9 @@ #define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ #define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */ #define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ -#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ +#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ #define WL_REGULATORY_VAL 0x00080000 -#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */ +#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */ #define WL_TAF_VAL 0x00100000 #define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ #define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ @@ -1319,7 +1360,6 @@ #define WL_TDLS_VAL 0x00001000 #define WL_MCNX_VAL 0x00002000 #define WL_PROT_VAL 0x00004000 -#define WL_PSTA_VAL 0x00008000 #define WL_TSO_VAL 0x00010000 #define WL_TRF_MGMT_VAL 0x00020000 #define WL_LPC_VAL 0x00040000 @@ -1328,7 +1368,7 @@ #define WL_P2PO_VAL 0x00200000 #define WL_TBTT_VAL 0x00400000 #define WL_FBT_VAL 0x00800000 -#define WL_RRM_VAL 0x00800000 /* reuse */ +#define WL_RRM_VAL 0x00800000 /* reuse */ #define WL_MQ_VAL 0x01000000 /* This level is currently used in Phoenix2 only */ #define WL_SRSCAN_VAL 0x02000000 @@ -1352,6 +1392,8 @@ */ #define WL_ASSOC_AP_VAL 0x00000001 #define WL_FILS_VAL 0x00000002 +#define WL_LATENCY_VAL 0x00000004 +#define WL_WBUS_VAL 0x00000008 /* max # of leds supported by GPIO (gpio pin# == led index#) */ #define WL_LED_NUMGPIO 32 /* gpio 0-31 */ @@ -1364,25 +1406,25 @@ #define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */ #define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */ #define WL_LED_BGMODE 6 /* on if gmode, off if bmode */ -#define WL_LED_WI1 7 -#define WL_LED_WI2 8 -#define WL_LED_WI3 9 +#define WL_LED_WI1 7 /* wlan indicator 1 mode (legacy cust) */ +#define WL_LED_WI2 8 /* wlan indicator 2 mode (legacy cust) */ +#define WL_LED_WI3 9 /* wlan indicator 3 mode (legacy cust) */ #define WL_LED_ASSOC 10 /* associated state indicator */ #define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */ -#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */ -#define WL_LED_WI4 13 -#define WL_LED_WI5 14 +#define WL_LED_ASSOCACT 12 /* on associated; blink fast for activity */ +#define WL_LED_WI4 13 /* wlan indicator 4 mode (legacy cust 5G) */ +#define WL_LED_WI5 14 /* wlan indicator 5 mode (legacy cust 2.4) */ #define WL_LED_BLINKSLOW 15 /* blink slow */ #define WL_LED_BLINKMED 16 /* blink med */ #define WL_LED_BLINKFAST 17 /* blink fast */ #define WL_LED_BLINKCUSTOM 18 /* blink custom */ -#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */ +#define WL_LED_BLINKPERIODIC 19 /* blink period (custom 1000ms / off 400ms) */ #define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */ /* keep on for 300 sec */ #define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */ -#define WL_LED_WI6 22 -#define WL_LED_WI7 23 -#define WL_LED_WI8 24 +#define WL_LED_WI6 22 /* wlan indicator 6 mode legacy rtr 43526 5 */ +#define WL_LED_WI7 23 /* wlan indicator 7 mode legacy rtr 43526 2.4 */ +#define WL_LED_WI8 24 /* wlan indicator 8 mode legacy rtr 43526 */ #define WL_LED_NUMBEHAVIOR 25 /* led behavior numeric value format */ @@ -1393,7 +1435,7 @@ /* number of bytes needed to define a proper bit mask for MAC event reporting */ #define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define BCMIO_NBBY 8 -#define WL_EVENTING_MASK_LEN (16+4) +#define WL_EVENTING_MASK_LEN (16+4) /* Don't increase this without wl review */ #define WL_EVENTING_MASK_EXT_LEN \ MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY)) @@ -1479,12 +1521,81 @@ /* maximum channels returned by the get valid channels iovar */ #define WL_NUMCHANNELS 64 +/* Channels break down for 2G BAND +* 2G 20MHz = 14 +* +* 2G 40MHz +* 9 * 2 = 18 +* +* 2G tot = 14 + 18 = 32 +* +* Channels Break down for 5G BAND +* 5G 20MHz +* 36-48 4 +* 52-64 4 +* 100-144 12 +* 149-161 4 +* 165 1 +* 5G 20 subtot = 25 +* +* 5G 40 12 * 2 = 24 +* 5G 80 6 * 4 = 24 +* 5G 160 2 * 8 = 16 +* +* 5G total = 25 + 24+ 24+ 16 = 89 +* +* TOTAL 2G and 5G +* 2G + 5G = (32 + 89) = 121 +* +* Channels Break down for 6G BAND +* 20MHz = 59 +* 40MHz 29 * 2 = 58 +* 80MHz 14 * 4 = 56 +* 160MHz 7 * 8 = 56 +* 6G total = 59 + 58 + 56 + 56 = 229 +* +* Toal WL_NUMCHANSPECS 2G/5G/6G +* total = 32 + 89 + 229 = 350 +* +* IF 5g 80+80 is defined +* 80MHz cf pairs are: +* 42 106 +* 42 122 +* 42 138 +* 42 155 +* 58 106 +* 58 122 +* 58 138 +* 58 155 +* 106 138 +* 106 155 +* 122 155 +* 138 155 +* +* +* 12 pairs * 8 primary channels = 96 +* TOTAL 2G + 5G + 5G (80 + 80) +* 32 + 89 + 96 = 217 +* +*TOTAL 2G + 5G + 5G (80 + 80) +6G (excluding 80 + 80) +* 32 + 89 + 96 + 229 = 446 +* +*/ +#ifdef WL_BAND6G +/* max number of chanspecs (used by the iovar to calc. buf space) */ +#ifdef WL11AC_80P80 +#define WL_NUMCHANSPECS 446 +#else +#define WL_NUMCHANSPECS 350 +#endif // endif +#else /* max number of chanspecs (used by the iovar to calc. buf space) */ #ifdef WL11AC_80P80 #define WL_NUMCHANSPECS 206 #else #define WL_NUMCHANSPECS 110 #endif // endif +#endif /* WL_BAND6G */ /* WDS link local endpoint WPA role */ #define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ @@ -1778,6 +1889,7 @@ #define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */ #define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */ +/* at some point we may need bitvec here (combination of actions) */ /* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */ #define WL_P2P_SCHED_ACTION_NONE 0 /* no action */ #define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */ @@ -1786,6 +1898,7 @@ /* schedule option - WL_P2P_SCHED_TYPE_XXX */ #define WL_P2P_SCHED_ACTION_RESET 255 /* reset */ +/* at some point we may need bitvec here (combination of options) */ /* schedule option - WL_P2P_SCHED_TYPE_ABS */ #define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */ #define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */ @@ -1820,11 +1933,6 @@ #define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 #define WLFEATURE_DISABLE_11N_GF 0x00000080 -/* Proxy STA modes */ -#define PSTA_MODE_DISABLED 0 -#define PSTA_MODE_PROXY 1 -#define PSTA_MODE_REPEATER 2 - /* op code in nat_cfg */ #define NAT_OP_ENABLE 1 /* enable NAT on given interface */ #define NAT_OP_DISABLE 2 /* disable NAT on given interface */ @@ -1963,7 +2071,7 @@ #define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */ #define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */ #endif /* WLAFTERBURNER */ -#define WL_SWFL_NOHWRADIO 0x0004 +#define WL_SWFL_NOHWRADIO 0x0004 /* Disable HW Radio monitor (e.g., Cust Spec) */ #define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */ #define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */ @@ -2083,10 +2191,11 @@ #define TOE_ERRTEST_RX_CSUM2 0x00000004 /* ARP Offload feature flags for arp_ol iovar */ -#define ARP_OL_AGENT 0x00000001 -#define ARP_OL_SNOOP 0x00000002 -#define ARP_OL_HOST_AUTO_REPLY 0x00000004 -#define ARP_OL_PEER_AUTO_REPLY 0x00000008 +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 +#define ARP_OL_UPDATE_HOST_CACHE 0x00000010 /* ARP Offload error injection */ #define ARP_ERRTEST_REPLY_PEER 0x1 @@ -2317,4 +2426,38 @@ #define ETD_DATA_JOIN_INFO 0 #define ETD_DATA_VERSION_V1 1 +/* CTMODE DBG */ +/* input param: [31:16] => MPDU_THRESHOLD + * [15:03] => RESERVED + * [02] => enable UFP + * [01] => enable UFC + * [00] => enalbe CTMODE + */ +#define CTMODE_DBG_CTMODE_EN (0x1u) +#define CTMODE_DBG_UFC_EN (0x2u) +#define CTMODE_DBG_UFP_EN (0x4u) +#define CTMODE_DBG_MPDU_THRESHOLD_SHIFT (7u) +#define CTMODE_DBG_MPDU_THRESHOLD_MASK ((0x1FFu) << CTMODE_DBG_MPDU_THRESHOLD_SHIFT) +#define CTMODE_DBG_BYTES_THRESHOLD_SHIFT (16u) +#define CTMODE_DBG_BYTES_THRESHOLD_MASK ((0xFFFu) << CTMODE_DBG_BYTES_THRESHOLD_SHIFT) + +/* ====== SC use case configs ========= */ +/* SC user/use case request */ +#define WL_SC_REQ_SCAN 0u /* user scan */ +#define WL_SC_REQ_CNX 1u /* associated idle */ +#define WL_SC_REQ_NAN 2u /* NAN synchronization and discovery offload */ + +/* === Per use case configuration === */ +/* scan cfgs */ +#define SC_SCAN_CFG_PASSIVE_MASK 0x01u /* Enable passive scan on sc */ +#define SC_SCAN_CFG_PASSIVE_SHIFT 0u +#define SC_SCAN_CFG_LP_SCAN_MASK 0x02u /* Enable low prio scan on sc */ +#define SC_SCAN_CFG_LP_SCAN_SHIFT 1u +#define SC_SCAN_CFG_REG_SCAN_MASK 0x04u /* Enable split scan using sc */ +#define SC_SCAN_CFG_REG_SCAN_SHIFT 2u +#define SC_SCAN_CFG_FULL_SCAN_MASK 0x08u /* Enable full scan on sc */ +#define SC_SCAN_CFG_FULL_SCAN_SHIFT 3u +/* Add get and set macros for each of the configs? */ + +/* === Place holder for cnx and nan cfgs === */ #endif /* wlioctl_defs_h */ diff --git a/bcmdhd.100.10.315.x/include/wlioctl_utils.h b/bcmdhd.100.10.315.x/include/wlioctl_utils.h index d553af5..e3926fd 100644 --- a/bcmdhd.100.10.315.x/include/wlioctl_utils.h +++ b/bcmdhd.100.10.315.x/include/wlioctl_utils.h @@ -1,7 +1,7 @@ /* * Custom OID/ioctl related helper functions. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/include/wpa.h b/bcmdhd.100.10.315.x/include/wpa.h index ba35495..9d8db99 100644 --- a/bcmdhd.100.10.315.x/include/wpa.h +++ b/bcmdhd.100.10.315.x/include/wpa.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WPA * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: wpa.h 761317 2018-05-07 21:33:58Z $ + * $Id: wpa.h 822438 2019-05-29 17:13:44Z $ */ #ifndef _proto_wpa_h_ @@ -155,8 +155,14 @@ typedef BWL_PRE_PACKED_STRUCT struct (cipher) == WPA_CIPHER_AES_CCM || \ (cipher) == WPA_CIPHER_AES_GCM || \ (cipher) == WPA_CIPHER_AES_GCM256 || \ + (cipher) == WPA_CIPHER_CCMP_256 || \ (cipher) == WPA_CIPHER_TPK) +#define IS_WPA_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \ + (cipher) == WPA_CIPHER_BIP_GMAC_128 || \ + (cipher) == WPA_CIPHER_BIP_GMAC_256 || \ + (cipher) == WPA_CIPHER_BIP_CMAC_256) + #ifdef BCMWAPI_WAI #define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \ (cipher) == WAPI_CSE_WPI_SMS4) @@ -191,6 +197,16 @@ typedef BWL_PRE_PACKED_STRUCT struct (cipher) == WPA_CIPHER_BIP_GMAC_128 || \ (cipher) == WPA_CIPHER_BIP_GMAC_256 || \ (cipher) == WPA_CIPHER_BIP_CMAC_256) + +#define WPA_IS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256 || \ + (akm) == RSN_AKM_FBT_SHA384) + +#define WPA_IS_FILS_AKM(akm) ((akm) == RSN_AKM_FILS_SHA256 || \ + (akm) == RSN_AKM_FILS_SHA384) + +#define WPA_IS_FILS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256_FILS || \ + (akm) == RSN_AKM_FBT_SHA384_FILS) + /* WPA TKIP countermeasures parameters */ #define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ #define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */ @@ -244,6 +260,8 @@ typedef uint32 rsn_ciphers_t; /* mask of rsn_cipher_t */ typedef uint8 rsn_akm_t; typedef uint8 auth_ie_type_mask_t; +/* Old location for this structure. Moved to bcmwpa.h */ +#ifndef RSN_IE_INFO_STRUCT_RELOCATED typedef struct rsn_ie_info { uint8 version; rsn_cipher_t g_cipher; @@ -268,7 +286,10 @@ typedef struct rsn_ie_info { uint8 kek_len; /* EAPOL KEK */ uint8 tk_len; /* EAPOL TK */ uint8 ptk_len; /* EAPOL PTK */ + uint8 kck2_len; /* EAPOL KCK2 */ + uint8 kek2_len; /* EAPOL KEK2 */ } rsn_ie_info_t; +#endif /* RSN_IE_INFO_STRUCT_RELOCATED */ #ifdef BCMWAPI_WAI #define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH diff --git a/bcmdhd.100.10.315.x/include/wps.h b/bcmdhd.100.10.315.x/include/wps.h index 7684de0..bceedb1 100644 --- a/bcmdhd.100.10.315.x/include/wps.h +++ b/bcmdhd.100.10.315.x/include/wps.h @@ -1,7 +1,7 @@ /* * WPS IE definitions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/linux_osl.c b/bcmdhd.100.10.315.x/linux_osl.c index f702c89..1e853cc 100644 --- a/bcmdhd.100.10.315.x/linux_osl.c +++ b/bcmdhd.100.10.315.x/linux_osl.c @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: linux_osl.c 767848 2018-06-15 09:33:44Z $ + * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $ */ #define LINUX_PORT @@ -45,6 +45,9 @@ #include #include #include +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0)) +#include +#endif #ifdef BCM_SECURE_DMA #include @@ -118,6 +121,7 @@ static int secdma_found = 0; static void osl_dma_lock(osl_t *osh); static void osl_dma_unlock(osl_t *osh); static void osl_dma_lock_init(osl_t *osh); + #define DMA_LOCK(osh) osl_dma_lock(osh) #define DMA_UNLOCK(osh) osl_dma_unlock(osh) #define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh); @@ -212,26 +216,92 @@ uint lmtest = FALSE; #ifdef DHD_MAP_LOGGING #define DHD_MAP_LOG_SIZE 2048 +typedef struct dhd_map_item { + dmaaddr_t pa; /* DMA address (physical) */ + uint64 ts_nsec; /* timestamp: nsec */ + uint32 size; /* mapping size */ + uint8 rsvd[4]; /* reserved for future use */ +} dhd_map_item_t; + typedef struct dhd_map_record { - dma_addr_t addr; - uint64 time; + uint32 items; /* number of total items */ + uint32 idx; /* current index of metadata */ + dhd_map_item_t map[0]; /* metadata storage */ } dhd_map_log_t; -dhd_map_log_t *dhd_map_log = NULL, *dhd_unmap_log = NULL; -uint32 map_idx = 0, unmap_idx = 0; - void -osl_dma_map_dump(void) +osl_dma_map_dump(osl_t *osh) +{ + dhd_map_log_t *map_log, *unmap_log; + uint64 ts_sec, ts_usec; + + map_log = (dhd_map_log_t *)(osh->dhd_map_log); + unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log); + osl_get_localtime(&ts_sec, &ts_usec); + + if (map_log && unmap_log) { + printk("%s: map_idx=%d unmap_idx=%d " + "current time=[%5lu.%06lu]\n", __FUNCTION__, + map_log->idx, unmap_log->idx, (unsigned long)ts_sec, + (unsigned long)ts_usec); + printk("%s: dhd_map_log(pa)=0x%llx size=%d," + " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__, + (uint64)__virt_to_phys((ulong)(map_log->map)), + (uint32)(sizeof(dhd_map_item_t) * map_log->items), + (uint64)__virt_to_phys((ulong)(unmap_log->map)), + (uint32)(sizeof(dhd_map_item_t) * unmap_log->items)); + } +} + +static void * +osl_dma_map_log_init(uint32 item_len) { - printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n", - __FUNCTION__, map_idx, unmap_idx, OSL_SYSUPTIME_US()); - if (dhd_map_log && dhd_unmap_log) { - printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n", - __FUNCTION__, (uint64)__virt_to_phys((ulong)dhd_map_log), - (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE), - (uint64)__virt_to_phys((ulong)dhd_unmap_log), - (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE)); + dhd_map_log_t *map_log; + gfp_t flags; + uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) + + (item_len * sizeof(dhd_map_item_t))); + + flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC; + map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags); + if (map_log) { + memset(map_log, 0, alloc_size); + map_log->items = item_len; + map_log->idx = 0; } + + return (void *)map_log; +} + +static void +osl_dma_map_log_deinit(osl_t *osh) +{ + if (osh->dhd_map_log) { + kfree(osh->dhd_map_log); + osh->dhd_map_log = NULL; + } + + if (osh->dhd_unmap_log) { + kfree(osh->dhd_unmap_log); + osh->dhd_unmap_log = NULL; + } +} + +static void +osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len) +{ + dhd_map_log_t *log = (dhd_map_log_t *)handle; + uint32 idx; + + if (log == NULL) { + printk("%s: log is NULL\n", __FUNCTION__); + return; + } + + idx = log->idx; + log->map[idx].ts_nsec = osl_localtime_ns(); + log->map[idx].pa = pa; + log->map[idx].size = len; + log->idx = (idx + 1) % log->items; } #endif /* DHD_MAP_LOGGING */ @@ -247,7 +317,6 @@ osl_error(int bcmerror) /* Array bounds covered by ASSERT in osl_attach */ return linuxbcmerrormap[-bcmerror]; } - osl_t * osl_attach(void *pdev, uint bustype, bool pkttag) { @@ -412,13 +481,14 @@ osl_attach(void *pdev, uint bustype, bool pkttag) DMA_LOCK_INIT(osh); #ifdef DHD_MAP_LOGGING - dhd_map_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags); - if (dhd_map_log) { - memset(dhd_map_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE); + osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE); + if (osh->dhd_map_log == NULL) { + printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__); } - dhd_unmap_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags); - if (dhd_unmap_log) { - memset(dhd_unmap_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE); + + osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE); + if (osh->dhd_unmap_log == NULL) { + printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__); } #endif /* DHD_MAP_LOGGING */ @@ -467,12 +537,8 @@ osl_detach(osl_t *osh) bcm_object_trace_deinit(); #ifdef DHD_MAP_LOGGING - if (dhd_map_log) { - kfree(dhd_map_log); - } - if (dhd_unmap_log) { - kfree(dhd_unmap_log); - } + osl_dma_map_log_deinit(osh->dhd_map_log); + osl_dma_map_log_deinit(osh->dhd_unmap_log); #endif /* DHD_MAP_LOGGING */ ASSERT(osh->magic == OS_HANDLE_MAGIC); @@ -601,7 +667,7 @@ osl_pci_bus(osl_t *osh) { ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); -#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) +#if defined(__ARM_ARCH_7A__) return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); #else return ((struct pci_dev *)osh->pdev)->bus->number; @@ -614,7 +680,7 @@ osl_pci_slot(osl_t *osh) { ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); -#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) +#if defined(__ARM_ARCH_7A__) return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1; #else return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); @@ -921,14 +987,12 @@ osl_virt_to_phys(void *va) return (void *)(uintptr)virt_to_phys(va); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) #include void BCMFASTPATH osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) { return; } -#endif /* LINUX_VERSION_CODE >= 2.6.36 */ dmaaddr_t BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) @@ -965,22 +1029,8 @@ osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_ map_addr = pci_map_single(osh->pdev, va, size, dir); #endif /* ! STB_SOC_WIFI */ -#ifdef DHD_MAP_LOGGING - if (dhd_map_log) { - dhd_map_log[map_idx].addr = map_addr; - dhd_map_log[map_idx].time = OSL_SYSUPTIME_US(); - map_idx++; - map_idx = map_idx % DHD_MAP_LOG_SIZE; - } -#endif /* DHD_MAP_LOGGING */ - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) ret = pci_dma_mapping_error(osh->pdev, map_addr); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5)) - ret = pci_dma_mapping_error(map_addr); -#else - ret = 0; -#endif // endif + if (ret) { printk("%s: Failed to map memory\n", __FUNCTION__); PHYSADDRLOSET(ret_addr, 0); @@ -990,6 +1040,10 @@ osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_ PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff); } +#ifdef DHD_MAP_LOGGING + osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size); +#endif /* DHD_MAP_LOGGING */ + DMA_UNLOCK(osh); return ret_addr; @@ -1009,17 +1063,12 @@ osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; -#ifdef BCMDMA64OSL - PHYSADDRTOULONG(pa, paddr); #ifdef DHD_MAP_LOGGING - if (dhd_unmap_log) { - dhd_unmap_log[unmap_idx].addr = paddr; - dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US(); - unmap_idx++; - unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE; - } + osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size); #endif /* DHD_MAP_LOGGING */ +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); pci_unmap_single(osh->pdev, paddr, size, dir); #else /* BCMDMA64OSL */ @@ -1037,19 +1086,11 @@ osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) dma_unmap_single(osh->pdev, (uintptr)pa, size, dir); #endif /* (__LINUX_ARM_ARCH__ == 8) */ #else /* STB_SOC_WIFI */ -#ifdef DHD_MAP_LOGGING - if (dhd_unmap_log) { - dhd_unmap_log[unmap_idx].addr = pa; - dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US(); - unmap_idx++; - unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE; - } -#endif /* DHD_MAP_LOGGING */ - pci_unmap_single(osh->pdev, (uint32)pa, size, dir); #endif /* STB_SOC_WIFI */ #endif /* BCMDMA64OSL */ + DMA_UNLOCK(osh); } @@ -1124,26 +1165,61 @@ osl_delay(uint usec) void osl_sleep(uint ms) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) if (ms < 20) usleep_range(ms*1000, ms*1000 + 1000); else -#endif // endif - msleep(ms); + msleep(ms); } uint64 osl_sysuptime_us(void) { - struct timeval tv; + struct osl_timespec tv; uint64 usec; - do_gettimeofday(&tv); + osl_do_gettimeofday(&tv); /* tv_usec content is fraction of a second */ usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec; return usec; } +uint64 +osl_localtime_ns(void) +{ + uint64 ts_nsec = 0; + + ts_nsec = local_clock(); + + return ts_nsec; +} + +void +osl_get_localtime(uint64 *sec, uint64 *usec) +{ + uint64 ts_nsec = 0; + unsigned long rem_nsec = 0; + + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, NSEC_PER_SEC); + *sec = (uint64)ts_nsec; + *usec = (uint64)(rem_nsec / MSEC_PER_SEC); +} + +uint64 +osl_systztime_us(void) +{ + struct timeval tv; + uint64 tzusec; + + do_gettimeofday(&tv); + /* apply timezone */ + tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) * + USEC_PER_SEC); + tzusec += tv.tv_usec; + + return tzusec; +} + /* * OSLREGOPS specifies the use of osl_XXX routines to be used for register access */ @@ -1819,7 +1895,15 @@ osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) /* timer apis */ /* Note: All timer api's are thread unsafe and should be protected with locks by caller */ -#ifdef REPORT_FATAL_TIMEOUTS +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +void +timer_cb_compat(struct timer_list *tl) +{ + timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer); + t->callback((ulong)t->arg); +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */ + osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg) { @@ -1836,11 +1920,9 @@ osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg) MFREE(NULL, t, sizeof(osl_timer_t)); return (NULL); } - t->timer->data = (ulong)arg; - t->timer->function = (linux_timer_fn)fn; t->set = TRUE; - init_timer(t->timer); + init_timer_compat(t->timer, (linux_timer_fn)fn, arg); return (t); } @@ -1858,7 +1940,7 @@ osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) if (periodic) { printf("Periodic timers are not supported by Linux timer apis\n"); } - t->timer->expires = jiffies + ms*HZ/1000; + timer_expires(t->timer) = jiffies + ms*HZ/1000; add_timer(t->timer); @@ -1876,9 +1958,9 @@ osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) printf("Periodic timers are not supported by Linux timer apis\n"); } t->set = TRUE; - t->timer->expires = jiffies + ms*HZ/1000; + timer_expires(t->timer) = jiffies + ms*HZ/1000; - mod_timer(t->timer, t->timer->expires); + mod_timer(t->timer, timer_expires(t->timer)); return; } @@ -1903,20 +1985,55 @@ osl_timer_del(osl_t *osh, osl_timer_t *t) } return (TRUE); } -#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +int +kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count) +{ + return (int)kernel_read(file, addr, (size_t)count, &offset); +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */ + +void * +osl_spin_lock_init(osl_t *osh) +{ + /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ + /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ + /* and this results in kernel asserts in internal builds */ + spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); + if (lock) + spin_lock_init(lock); + return ((void *)lock); +} + +void +osl_spin_lock_deinit(osl_t *osh, void *lock) +{ + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); +} + +unsigned long +osl_spin_lock(void *lock) +{ + unsigned long flags = 0; + + if (lock) + spin_lock_irqsave((spinlock_t *)lock, flags); + + return flags; +} + +void +osl_spin_unlock(void *lock, unsigned long flags) +{ + if (lock) + spin_unlock_irqrestore((spinlock_t *)lock, flags); +} #ifdef USE_DMA_LOCK static void osl_dma_lock(osl_t *osh) { - /* XXX: The conditional check is to avoid the scheduling bug. - * If the spin_lock_bh is used under the spin_lock_irqsave, - * Kernel triggered the warning message as the spin_lock_irqsave - * disables the interrupt and the spin_lock_bh doesn't use in case - * interrupt is disabled. - * Please refer to the __local_bh_enable_ip() function - * in kernel/softirq.c to understand the condtion. - */ if (likely(in_irq() || irqs_disabled())) { spin_lock(&osh->dma_lock); } else { @@ -1942,4 +2059,40 @@ osl_dma_lock_init(osl_t *osh) spin_lock_init(&osh->dma_lock); osh->dma_lock_bh = FALSE; } -#endif /* USE_DMA_LOCK */ \ No newline at end of file +#endif /* USE_DMA_LOCK */ + +void +osl_do_gettimeofday(struct osl_timespec *ts) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) + struct timespec curtime; +#else + struct timeval curtime; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) + getnstimeofday(&curtime); + ts->tv_nsec = curtime.tv_nsec; + ts->tv_usec = curtime.tv_nsec / 1000; +#else + do_gettimeofday(&curtime); + ts->tv_usec = curtime.tv_usec; + ts->tv_nsec = curtime.tv_usec * 1000; +#endif + ts->tv_sec = curtime.tv_sec; +} + +void +osl_get_monotonic_boottime(struct osl_timespec *ts) +{ + struct timespec curtime; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + curtime = ktime_to_timespec(ktime_get_boottime()); +#else + get_monotonic_boottime(&curtime); +#endif + ts->tv_sec = curtime.tv_sec; + ts->tv_nsec = curtime.tv_nsec; + ts->tv_usec = curtime.tv_nsec / 1000; +} \ No newline at end of file diff --git a/bcmdhd.100.10.315.x/linux_osl_priv.h b/bcmdhd.100.10.315.x/linux_osl_priv.h index 7ae18c2..7dfb96c 100644 --- a/bcmdhd.100.10.315.x/linux_osl_priv.h +++ b/bcmdhd.100.10.315.x/linux_osl_priv.h @@ -1,7 +1,7 @@ /* * Private header file for Linux OS Independent Layer * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: linux_osl_priv.h 737887 2017-12-23 12:15:26Z $ + * $Id: linux_osl_priv.h 794159 2018-12-12 07:41:14Z $ */ #ifndef _LINUX_OSL_PRIV_H_ @@ -175,6 +175,10 @@ struct osl_info { spinlock_t dma_lock; bool dma_lock_bh; #endif /* USE_DMA_LOCK */ +#ifdef DHD_MAP_LOGGING + void *dhd_map_log; + void *dhd_unmap_log; +#endif /* DHD_MAP_LOGGING */ }; #endif /* _LINUX_OSL_PRIV_H_ */ diff --git a/bcmdhd.100.10.315.x/linux_pkt.c b/bcmdhd.100.10.315.x/linux_pkt.c index 15891fd..fe65534 100644 --- a/bcmdhd.100.10.315.x/linux_pkt.c +++ b/bcmdhd.100.10.315.x/linux_pkt.c @@ -1,7 +1,7 @@ /* * Linux Packet (skb) interface * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include "linux_osl_priv.h" @@ -73,7 +75,7 @@ int osl_static_mem_init(osl_t *osh, void *adapter) #ifdef CONFIG_DHD_USE_STATIC_BUF if (!bcm_static_buf && adapter) { if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, - 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { + DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { printk("can not alloc static buf!\n"); bcm_static_skb = NULL; ASSERT(osh->magic == OS_HANDLE_MAGIC); @@ -92,7 +94,7 @@ int osl_static_mem_init(osl_t *osh, void *adapter) int i; void *skb_buff_ptr = 0; bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); - skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); + skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0); if (!skb_buff_ptr) { printk("cannot alloc static buf!\n"); bcm_static_buf = NULL; diff --git a/bcmdhd.100.10.315.x/otpdefs.h b/bcmdhd.100.10.315.x/otpdefs.h new file mode 100644 index 0000000..ed50b15 --- /dev/null +++ b/bcmdhd.100.10.315.x/otpdefs.h @@ -0,0 +1,121 @@ +/* + * otpdefs.h SROM/OTP definitions. + * + * Copyright (C) 1999-2019, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id$ + */ + +#ifndef _OTPDEFS_H_ +#define _OTPDEFS_H_ + +/* SFLASH */ +#define SFLASH_ADDRESS_OFFSET_4368 0x1C000000u +#define SFLASH_SKU_OFFSET_4368 0xEu +#define SFLASH_MACADDR_OFFSET_4368 0x4u +/* + * In sflash based chips, first word in sflash says the length. + * So only default value is defined here. Actual length is read + * from sflash in dhdpcie_srom_sflash_health_chk + * 0x0521 * 2 .x2 since length says number of words. + */ +#define SFLASH_LEN_4368 0xA42u + +#define SROM_ADDRESS_OFFSET_4355 0x0800u +#define SROM_ADDRESS_OFFSET_4364 0xA000u +#define SROM_ADDRESS_OFFSET_4377 0x0800u +#define SROM_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset)) +#define SROM_MACADDR_OFFSET_4355 0x84u +#define SROM_MACADDR_OFFSET_4364 0x82u +#define SROM_MACADDR_OFFSET_4377 0xE2u +#define SROM_SKU_OFFSET_4355 0x8Au +#define SROM_SKU_OFFSET_4364 0x8Cu +#define SROM_SKU_OFFSET_4377 0xECu +#define SROM_CAL_SIG1_OFFSET_4355 0xB8u +#define SROM_CAL_SIG2_OFFSET_4355 0xBAu +#define SROM_CAL_SIG1_OFFSET_4364 0xA0u +#define SROM_CAL_SIG2_OFFSET_4364 0xA2u +#define SROM_CAL_SIG1 0x4c42u +#define SROM_CAL_SIG2 0x424fu +#define SROM_LEN_4355 512u +#define SROM_LEN_4364 2048u +#define SROM_LEN_4377 2048u + +#define OTP_USER_AREA_OFFSET_4355 0xC0u +#define OTP_USER_AREA_OFFSET_4364 0xC0u +#define OTP_USER_AREA_OFFSET_4368 0x120u +#define OTP_USER_AREA_OFFSET_4377 0x120u +#define OTP_OFFSET_4368 0x5000u +#define OTP_OFFSET_4377 0x11000u +#define OTP_CTRL1_VAL 0xFA0000 +#define OTP_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset)) +#define OTP_VERSION_TUPLE_ID 0x15 +#define OTP_VENDOR_TUPLE_ID 0x80 +#define OTP_CIS_REGION_END_TUPLE_ID 0XFF + +#define PCIE_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0x3000) +#define SPROM_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_CTRL) +#define SPROM_CTRL_OPCODE_READ_MASK 0x9FFFFFFF +#define SPROM_CTRL_START_BUSY_MASK 0x80000000 +#define SPROM_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_ADDRESS) +#define SPROM_DATA(sih) (SI_ENUM_BASE(sih) + CC_SROM_DATA) +#define OTP_CTRL1_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0xF4) +#define PMU_MINRESMASK_REG_ADDR(sih) (SI_ENUM_BASE(sih) + MINRESMASKREG) +#define CHIP_COMMON_STATUS_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CHIPST) +#define CHIP_COMMON_CLKDIV2_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CLKDIV2) + +#define CC_CLKDIV2_SPROMDIV_MASK 0x7u +#define CC_CLKDIV2_SPROMDIV_VAL 0X4u +#define CC_CHIPSTATUS_STRAP_BTUART_MASK 0x40u +#define PMU_OTP_PWR_ON_MASK 0xC47 +#define PMU_PWRUP_DELAY 500 /* in us */ +#define DONGLE_TREFUP_PROGRAM_DELAY 5000 /* 5ms in us */ +#define SPROM_BUSY_POLL_DELAY 5 /* 5us */ + +typedef enum { + BCM4355_IDX = 0, + BCM4364_IDX, + BCM4368_IDX, + BCM4377_IDX, + BCMMAX_IDX +} chip_idx_t; + +typedef enum { + BCM4368_BTOP_IDX, + BCM4377_BTOP_IDX, + BCMMAX_BTOP_IDX +} chip_idx_btop_t; + +typedef enum { + BCM4368_SFLASH_IDX, + BCMMAX_SFLASH_IDX +} chip_idx_sflash_t; + +extern uint32 otp_addr_offsets[]; +extern uint32 otp_usrarea_offsets[]; +extern uint32 sku_offsets[]; +extern uint32 srf_addr_offsets[]; +extern uint32 supported_chips[]; + +char *dhd_get_plat_sku(void); +#endif /* _OTPDEFS_H */ diff --git a/bcmdhd.100.10.315.x/pcie_core.c b/bcmdhd.100.10.315.x/pcie_core.c index 08c21a3..39b0c6b 100644 --- a/bcmdhd.100.10.315.x/pcie_core.c +++ b/bcmdhd.100.10.315.x/pcie_core.c @@ -3,7 +3,7 @@ * Contains PCIe related functions that are shared between different driver models (e.g. firmware * builds, DHD builds, BMAC builds), in order to avoid code duplication. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/sbutils.c b/bcmdhd.100.10.315.x/sbutils.c index 1a3253a..68961ee 100644 --- a/bcmdhd.100.10.315.x/sbutils.c +++ b/bcmdhd.100.10.315.x/sbutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/bcmdhd.100.10.315.x/siutils.c b/bcmdhd.100.10.315.x/siutils.c index 02c7c7b..bfc245f 100644 --- a/bcmdhd.100.10.315.x/siutils.c +++ b/bcmdhd.100.10.315.x/siutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: siutils.c 769534 2018-06-26 21:19:11Z $ + * $Id: siutils.c 798061 2019-01-04 23:27:15Z $ */ #include @@ -1044,6 +1044,23 @@ si_setint(si_t *sih, int siflag) ASSERT(0); } +uint32 +si_oobr_baseaddr(si_t *sih, bool second) +{ + si_info_t *sii = SI_INFO(sih); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return 0; + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return (second ? sii->oob_router1 : sii->oob_router); + else { + ASSERT(0); + return 0; + } +} + uint si_coreid(si_t *sih) { @@ -1972,6 +1989,7 @@ si_chip_hostif(si_t *sih) else if (CST4369_CHIPMODE_PCIE(sih->chipst)) hosti = CHIP_HOSTIF_PCIEMODE; break; + case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: case BCM43556_CHIP_ID: @@ -2866,6 +2884,7 @@ si_tcm_size(si_t *sih) bool wasup; uint32 corecap; uint memsize = 0; + uint banku_size = 0; uint32 nab = 0; uint32 nbb = 0; uint32 totb = 0; @@ -2902,7 +2921,12 @@ si_tcm_size(si_t *sih) W_REG(sii->osh, arm_bidx, idx); bxinfo = R_REG(sii->osh, arm_binfo); - memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; + if (bxinfo & ARMCR4_BUNITSZ_MASK) { + banku_size = ARMCR4_BSZ_1K; + } else { + banku_size = ARMCR4_BSZ_8K; + } + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size; } /* Return to previous state and core */ @@ -3527,11 +3551,7 @@ bool _bcmsrpwr = TRUE; bool _bcmsrpwr = FALSE; #endif // endif -#ifndef BCMSDIO -#define PWRREQ_OFFSET(sih) DAR_PCIE_PWR_CTRL((sih)->buscorerev) -#else #define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl) -#endif // endif static void si_corereg_pciefast_write(si_t *sih, uint regoff, uint val) @@ -3555,12 +3575,6 @@ si_corereg_pciefast_read(si_t *sih, uint regoff) ASSERT((BUSTYPE(sih->bustype) == PCI_BUS)); -#ifndef BCMSDIO - if (PCIECOREREV(sih->buscorerev) == 66) { - si_corereg_pciefast_write(sih, OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl), 0); - } -#endif // endif - r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); @@ -3647,7 +3661,7 @@ si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val) ASSERT(r == val); } - r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih); return r; } @@ -3665,7 +3679,7 @@ si_srpwr_stat(si_t *sih) r = si_corereg_pciefast_read(sih, offset); } - r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih); return r; } @@ -3683,11 +3697,26 @@ si_srpwr_domain(si_t *sih) r = si_corereg_pciefast_read(sih, offset); } - r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK; + r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK; return r; } +uint32 +si_srpwr_domain_all_mask(si_t *sih) +{ + uint32 mask = SRPWR_DMN0_PCIE_MASK | + SRPWR_DMN1_ARMBPSD_MASK | + SRPWR_DMN2_MACAUX_MASK | + SRPWR_DMN3_MACMAIN_MASK; + + if (si_scan_core_present(sih)) { + mask |= SRPWR_DMN4_MACSCAN_MASK; + } + + return mask; +} + /* Utility API to read/write the raw registers with absolute address. * This function can be invoked from either FW or host driver. */ @@ -3762,3 +3791,10 @@ si_lhl_ps_mode(si_t *sih) si_info_t *sii = SI_INFO(sih); return sii->lhl_ps_mode; } + +bool +BCMRAMFN(si_scan_core_present)(si_t *sih) +{ + return ((si_numcoreunits(sih, D11_CORE_ID) >= 2) && + (si_numcoreunits(sih, SR_CORE_ID) > 4)); +} diff --git a/bcmdhd.100.10.315.x/siutils_priv.h b/bcmdhd.100.10.315.x/siutils_priv.h index e17abe8..983619b 100644 --- a/bcmdhd.100.10.315.x/siutils_priv.h +++ b/bcmdhd.100.10.315.x/siutils_priv.h @@ -1,7 +1,7 @@ /* * Include file private to the SOC Interconnect support files. * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: siutils_priv.h 769534 2018-06-26 21:19:11Z $ + * $Id: siutils_priv.h 795345 2018-12-18 16:52:03Z $ */ #ifndef _siutils_priv_h_ @@ -33,7 +33,7 @@ #if defined(SI_ERROR_ENFORCE) #define SI_ERROR(args) printf args #else -#define SI_ERROR(args) printf args +#define SI_ERROR(args) #endif // endif #if defined(ENABLE_CORECAPTURE) @@ -157,6 +157,7 @@ typedef struct si_info { void *curwrap; /**< current wrapper va */ uint32 oob_router; /**< oob router registers for axi */ + uint32 oob_router1; /**< oob router registers for axi */ si_cores_info_t *cores_info; gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */ diff --git a/bcmdhd.100.10.315.x/wl_android.c b/bcmdhd.100.10.315.x/wl_android.c index 4fb7ab9..f914498 100644 --- a/bcmdhd.100.10.315.x/wl_android.c +++ b/bcmdhd.100.10.315.x/wl_android.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Android related functions * - * Copyright (C) 1999-2018, Broadcom. + * Copyright (C) 1999-2019, Broadcom. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: wl_android.c 771907 2018-07-12 11:19:34Z $ + * $Id: wl_android.c 825470 2019-06-14 09:08:11Z $ */ #include @@ -54,6 +54,7 @@ #endif // endif #ifdef WL_CFG80211 #include +#include #endif // endif #ifdef WL_NAN #include @@ -64,27 +65,47 @@ #include #include #include -#ifdef WL_MBO -#include -#endif /* WL_MBO */ #ifdef WL_BCNRECV #include +#include #endif /* WL_BCNRECV */ +#ifdef WL_MBO +#include +#endif /* WL_MBO */ +#ifdef RTT_SUPPORT +#include +#endif /* RTT_SUPPORT */ +#ifdef WL_ESCAN +#include +#endif #ifdef WL_STATIC_IF #define WL_BSSIDX_MAX 16 #endif /* WL_STATIC_IF */ -#ifndef WL_CFG80211 -#define htod32(i) i -#define htod16(i) i -#define dtoh32(i) i -#define dtoh16(i) i -#define htodchanspec(i) i -#define dtohchanspec(i) i -#endif - -uint android_msg_level = ANDROID_ERROR_LEVEL; +uint android_msg_level = ANDROID_ERROR_LEVEL | ANDROID_MSG_LEVEL; + +#define ANDROID_ERROR_MSG(x, args...) \ + do { \ + if (android_msg_level & ANDROID_ERROR_LEVEL) { \ + printk(KERN_ERR "[dhd] ANDROID-ERROR) " x, ## args); \ + } \ + } while (0) +#define ANDROID_TRACE_MSG(x, args...) \ + do { \ + if (android_msg_level & ANDROID_TRACE_LEVEL) { \ + printk(KERN_INFO "[dhd] ANDROID-TRACE) " x, ## args); \ + } \ + } while (0) +#define ANDROID_INFO_MSG(x, args...) \ + do { \ + if (android_msg_level & ANDROID_INFO_LEVEL) { \ + printk(KERN_INFO "[dhd] ANDROID-INFO) " x, ## args); \ + } \ + } while (0) +#define ANDROID_ERROR(x) ANDROID_ERROR_MSG x +#define ANDROID_TRACE(x) ANDROID_TRACE_MSG x +#define ANDROID_INFO(x) ANDROID_INFO_MSG x /* * Android private command strings, PLEASE define new private commands here @@ -106,7 +127,9 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_BTCOEXMODE "BTCOEXMODE" #define CMD_SETSUSPENDOPT "SETSUSPENDOPT" #define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_SETDTIM_IN_SUSPEND "SET_DTIM_IN_SUSPEND" #define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND" +#define CMD_DISDTIM_IN_SUSPEND "DISABLE_DTIM_IN_SUSPEND" #define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" #define CMD_SETFWPATH "SETFWPATH" #define CMD_SETBAND "SETBAND" @@ -142,16 +165,6 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #endif /* WL_SUPPORT_AUTO_CHANNEL */ #define CMD_KEEP_ALIVE "KEEPALIVE" -#ifdef BCMCCX -/* CCX Private Commands */ -#define CMD_GETCCKM_RN "get cckm_rn" -#define CMD_SETCCKM_KRK "set cckm_krk" -#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies" - -#define CCKM_KRK_LEN 16 -#define CCKM_BTK_LEN 32 -#endif // endif - #ifdef PNO_SUPPORT #define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" #define CMD_PNOSETUP_SET "PNOSETUP " @@ -194,6 +207,22 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA" #define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE" +#define BUFSZ 8 +#define BUFSZN BUFSZ + 1 + +#define _S(x) #x +#define S(x) _S(x) + +#define MAXBANDS 2 /**< Maximum #of bands */ +#define BAND_2G_INDEX 0 +#define BAND_5G_INDEX 0 + +typedef union { + wl_roam_prof_band_v1_t v1; + wl_roam_prof_band_v2_t v2; + wl_roam_prof_band_v3_t v3; +} wl_roamprof_band_t; + #ifdef WLWFDS #define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" #define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH" @@ -228,6 +257,15 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS" #endif /* SUPPORT_AP_RADIO_PWRSAVE */ +#ifdef SUPPORT_AP_SUSPEND +#define CMD_SET_AP_SUSPEND "SET_AP_SUSPEND" +#endif /* SUPPORT_AP_SUSPEND */ + +#ifdef SUPPORT_AP_BWCTRL +#define CMD_SET_AP_BW "SET_AP_BW" +#define CMD_GET_AP_BW "GET_AP_BW" +#endif /* SUPPORT_AP_BWCTRL */ + /* miracast related definition */ #define MIRACAST_MODE_OFF 0 #define MIRACAST_MODE_SOURCE 1 @@ -261,7 +299,7 @@ struct connection_stats { static LIST_HEAD(miracast_resume_list); #ifdef WL_CFG80211 static u8 miracast_cur_mode; -#endif +#endif /* WL_CFG80211 */ #ifdef DHD_LOG_DUMP #define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP" @@ -270,10 +308,21 @@ static u8 miracast_cur_mode; void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd); #endif /* DHD_LOG_DUMP */ +#ifdef DHD_STATUS_LOGGING +#define CMD_DUMP_STATUS_LOG "DUMP_STAT_LOG" +#define CMD_QUERY_STATUS_LOG "QUERY_STAT_LOG" +#endif /* DHD_STATUS_LOGGING */ + #ifdef DHD_DEBUG_UART extern bool dhd_debug_uart_is_running(struct net_device *dev); #endif /* DHD_DEBUG_UART */ +#ifdef RTT_GEOFENCE_INTERVAL +#if defined(RTT_SUPPORT) && defined(WL_NAN) +#define CMD_GEOFENCE_INTERVAL "GEOFENCE_INT" +#endif /* RTT_SUPPORT && WL_NAN */ +#endif /* RTT_GEOFENCE_INTERVAL */ + struct io_cfg { s8 *iovar; s32 param; @@ -283,6 +332,18 @@ struct io_cfg { struct list_head list; }; +typedef enum { + HEAD_SAR_BACKOFF_DISABLE = -1, + HEAD_SAR_BACKOFF_ENABLE = 0, + GRIP_SAR_BACKOFF_DISABLE, + GRIP_SAR_BACKOFF_ENABLE, + NR_mmWave_SAR_BACKOFF_DISABLE, + NR_mmWave_SAR_BACKOFF_ENABLE, + NR_Sub6_SAR_BACKOFF_DISABLE, + NR_Sub6_SAR_BACKOFF_ENABLE, + SAR_BACKOFF_DISABLE_ALL +} sar_modes; + #if defined(BCMFW_ROAM_ENABLE) #define CMD_SET_ROAMPREF "SET_ROAMPREF" @@ -366,6 +427,21 @@ static const wl_natoe_sub_cmd_t natoe_cmd_list[] = { #ifdef WL_BCNRECV #define CMD_BEACON_RECV "BEACON_RECV" #endif /* WL_BCNRECV */ +#ifdef WL_CAC_TS +#define CMD_CAC_TSPEC "CAC_TSPEC" +#endif /* WL_CAC_TS */ +#ifdef WL_CHAN_UTIL +#define CMD_GET_CHAN_UTIL "GET_CU" +#endif /* WL_CHAN_UTIL */ + +#ifdef SUPPORT_SOFTAP_ELNA_BYPASS +#define CMD_SET_SOFTAP_ELNA_BYPASS "SET_SOFTAP_ELNA_BYPASS" +#define CMD_GET_SOFTAP_ELNA_BYPASS "GET_SOFTAP_ELNA_BYPASS" +#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */ + +#ifdef WL_NAN +#define CMD_GET_NAN_STATUS "GET_NAN_STATUS" +#endif /* WL_NAN */ /* drv command info structure */ typedef struct wl_drv_cmd_info { @@ -481,6 +557,11 @@ static struct genl_multicast_group wl_genl_mcast = { #define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */ #endif /* SUPPORT_LQCM */ +#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS +#define NUMBER_SEQUENTIAL_PRIVCMD_ERRORS 7 +static int priv_cmd_errors = 0; +#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */ + /** * Extern function declarations (TODO: move them to dhd_linux.h) */ @@ -519,6 +600,74 @@ extern char iface_name[IFNAMSIZ]; extern bool g_pm_control; #endif /* DHD_PM_CONTROL_FROM_FILE */ +/* private command support for restoring roam/scan parameters */ +#ifdef SUPPORT_RESTORE_SCAN_PARAMS +#define CMD_RESTORE_SCAN_PARAMS "RESTORE_SCAN_PARAMS" + +typedef int (*PRIV_CMD_HANDLER) (struct net_device *dev, char *command); +typedef int (*PRIV_CMD_HANDLER_WITH_LEN) (struct net_device *dev, char *command, int total_len); + +enum { + RESTORE_TYPE_UNSPECIFIED = 0, + RESTORE_TYPE_PRIV_CMD = 1, + RESTORE_TYPE_PRIV_CMD_WITH_LEN = 2 +}; + +typedef struct android_restore_scan_params { + char command[64]; + int parameter; + int cmd_type; + union { + PRIV_CMD_HANDLER cmd_handler; + PRIV_CMD_HANDLER_WITH_LEN cmd_handler_w_len; + }; +} android_restore_scan_params_t; + +/* function prototypes of private command handler */ +static int wl_android_set_roam_trigger(struct net_device *dev, char* command); +int wl_android_set_roam_delta(struct net_device *dev, char* command); +int wl_android_set_roam_scan_period(struct net_device *dev, char* command); +int wl_android_set_full_roam_scan_period(struct net_device *dev, char* command, int total_len); +int wl_android_set_roam_scan_control(struct net_device *dev, char *command); +int wl_android_set_scan_channel_time(struct net_device *dev, char *command); +int wl_android_set_scan_home_time(struct net_device *dev, char *command); +int wl_android_set_scan_home_away_time(struct net_device *dev, char *command); +int wl_android_set_scan_nprobes(struct net_device *dev, char *command); +static int wl_android_set_band(struct net_device *dev, char *command); +int wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command); +int wl_android_set_wes_mode(struct net_device *dev, char *command); +int wl_android_set_okc_mode(struct net_device *dev, char *command); + +/* default values */ +#ifdef ROAM_API +#define DEFAULT_ROAM_TIRGGER -75 +#define DEFAULT_ROAM_DELTA 10 +#define DEFAULT_ROAMSCANPERIOD 10 +#define DEFAULT_FULLROAMSCANPERIOD_SET 120 +#endif /* ROAM_API */ +#define DEFAULT_BAND 0 + +/* restoring parameter list, please don't change order */ +static android_restore_scan_params_t restore_params[] = +{ +/* wbtext need to be disabled while updating roam/scan parameters */ +#ifdef ROAM_API + { CMD_ROAMTRIGGER_SET, DEFAULT_ROAM_TIRGGER, + RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_trigger}, + { CMD_ROAMDELTA_SET, DEFAULT_ROAM_DELTA, + RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_delta}, + { CMD_ROAMSCANPERIOD_SET, DEFAULT_ROAMSCANPERIOD, + RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_period}, + { CMD_FULLROAMSCANPERIOD_SET, DEFAULT_FULLROAMSCANPERIOD_SET, + RESTORE_TYPE_PRIV_CMD_WITH_LEN, + .cmd_handler_w_len = wl_android_set_full_roam_scan_period}, +#endif /* ROAM_API */ + { CMD_SETBAND, DEFAULT_BAND, + RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_band}, + { "\0", 0, RESTORE_TYPE_UNSPECIFIED, .cmd_handler = NULL} +}; +#endif /* SUPPORT_RESTORE_SCAN_PARAMS */ + /** * Local (static) functions and variables */ @@ -544,8 +693,8 @@ static int wl_android_set_wfds_hash( smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN); if (smbuf == NULL) { - ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n", - __FUNCTION__, WLC_IOCTL_MAXLEN)); + ANDROID_ERROR(("wl_android_set_wfds_hash: failed to allocated memory %d bytes\n", + WLC_IOCTL_MAXLEN)); return -ENOMEM; } @@ -561,7 +710,7 @@ static int wl_android_set_wfds_hash( } if (error) { - ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error)); + ANDROID_ERROR(("wl_android_set_wfds_hash: failed to %s, error=%d\n", command, error)); } if (smbuf) { @@ -586,7 +735,7 @@ static int wl_android_get_link_speed(struct net_device *net, char *command, int /* Convert Kbps to Android Mbps */ link_speed = link_speed / 1000; bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed); - ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command)); + ANDROID_INFO(("wl_android_get_link_speed: command result is %s\n", command)); return bytes_written; } @@ -613,15 +762,15 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ /* Ap/GO mode * driver rssi */ - ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim)); + ANDROID_TRACE(("wl_android_get_rssi: cmd:%s\n", delim)); /* skip space from delim after finding char */ delim++; if (!(bcm_ether_atoe((delim), &scbval.ea))) { - ANDROID_ERROR(("%s:address err\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_get_rssi: address err\n")); return -1; } scbval.val = htod32(0); - ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); + ANDROID_TRACE(("wl_android_get_rssi: address:"MACDBG, MAC2STRDBG(scbval.ea.octet))); #ifdef WL_VIRTUAL_APSTA /* RSDB AP may have another virtual interface * In this case, format of private command is as following, @@ -645,7 +794,7 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ } else { /* STA/GC mode */ - memset(&scbval, 0, sizeof(scb_val_t)); + bzero(&scbval, sizeof(scb_val_t)); } error = wldev_get_rssi(target_ndev, &scbval); @@ -659,7 +808,7 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ if (error) return -1; if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { - ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_get_rssi: wldev_get_ssid failed\n")); } else if (total_len <= ssid.SSID_len) { return -ENOMEM; } else { @@ -673,7 +822,7 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ " rssi %d", scbval.val); command[bytes_written] = '\0'; - ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + ANDROID_TRACE(("wl_android_get_rssi: command result is %s (%d)\n", command, bytes_written)); return bytes_written; } @@ -692,10 +841,10 @@ static int wl_android_set_suspendopt(struct net_device *dev, char *command) if (ret_now != suspend_flag) { if (!(ret = net_os_set_suspend(dev, ret_now, 1))) { - ANDROID_INFO(("%s: Suspend Flag %d -> %d\n", - __FUNCTION__, ret_now, suspend_flag)); + ANDROID_INFO(("wl_android_set_suspendopt: Suspend Flag %d -> %d\n", + ret_now, suspend_flag)); } else { - ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_suspendopt: failed %d\n", ret)); } } @@ -714,9 +863,9 @@ static int wl_android_set_suspendmode(struct net_device *dev, char *command) suspend_flag = 1; if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) - ANDROID_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag)); + ANDROID_INFO(("wl_android_set_suspendmode: Suspend Mode %d\n", suspend_flag)); else - ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_suspendmode: failed %d\n", ret)); #endif // endif return ret; @@ -733,9 +882,9 @@ int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_l if (error) return -1; - ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode)); + ANDROID_INFO(("wl_android_get_80211_mode: mode:%s\n", mode)); bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode); - ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + ANDROID_INFO(("wl_android_get_80211_mode: command:%s EXIT\n", command)); return bytes_written; } @@ -761,13 +910,14 @@ int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len return -1; chanspec = wl_chspec_driver_to_host(chsp); - ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec)); + ANDROID_INFO(("wl_android_get_80211_mode: return value of chanspec:%x\n", chanspec)); channel = chanspec & WL_CHANSPEC_CHAN_MASK; band = chanspec & WL_CHANSPEC_BAND_MASK; bw = chanspec & WL_CHANSPEC_BW_MASK; - ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw)); + ANDROID_INFO(("wl_android_get_80211_mode: channel:%d band:%d bandwidth:%d\n", + channel, band, bw)); if (bw == WL_CHANSPEC_BW_80) bw = WL_CH_BANDWIDTH_80MHZ; @@ -801,11 +951,11 @@ int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC, channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw); - ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + ANDROID_INFO(("wl_android_get_chanspec: command:%s EXIT\n", command)); return bytes_written; } -#endif +#endif /* WL_CFG80211 */ /* returns current datarate datarate returned from firmware are in 500kbps */ int wl_android_get_datarate(struct net_device *dev, char *command, int total_len) @@ -818,7 +968,7 @@ int wl_android_get_datarate(struct net_device *dev, char *command, int total_len if (error) return -1; - ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate)); + ANDROID_INFO(("wl_android_get_datarate: datarate:%d\n", datarate)); bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2)); return bytes_written; @@ -833,7 +983,7 @@ int wl_android_get_assoclist(struct net_device *dev, char *command, int total_le sizeof(struct ether_addr) + sizeof(uint)] = {0}; struct maclist *assoc_maclist = (struct maclist *)mac_buf; - ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__)); + ANDROID_TRACE(("wl_android_get_assoclist: ENTER\n")); assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); @@ -854,8 +1004,9 @@ int wl_android_get_assoclist(struct net_device *dev, char *command, int total_le if ((len > 0) && (len < (total_len - bytes_written))) { bytes_written += len; } else { - ANDROID_ERROR(("%s: Insufficient buffer %d, bytes_written %d\n", - __FUNCTION__, total_len, bytes_written)); + ANDROID_ERROR(("wl_android_get_assoclist: Insufficient buffer %d," + " bytes_written %d\n", + total_len, bytes_written)); bytes_written = -1; break; } @@ -874,12 +1025,12 @@ static int wl_android_set_csa(struct net_device *dev, char *command) u32 chnsp = 0; int err = 0; - ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command)); + ANDROID_INFO(("wl_android_set_csa: command:%s\n", command)); command = (command + strlen(CMD_SET_CSA)); /* Order is mode, count channel */ if (!*++command) { - ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_csa:error missing arguments\n")); return -1; } csa_arg.mode = bcm_atoi(command); @@ -890,7 +1041,7 @@ static int wl_android_set_csa(struct net_device *dev, char *command) } if (!*++command) { - ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_csa: error missing count\n")); return -1; } command++; @@ -900,13 +1051,13 @@ static int wl_android_set_csa(struct net_device *dev, char *command) csa_arg.chspec = 0; command += 2; if (!*command) { - ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_csa: error missing channel\n")); return -1; } chnsp = wf_chspec_aton(command); if (chnsp == 0) { - ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_csa:chsp is not correct\n")); return -1; } chnsp = wl_chspec_host_to_driver(chnsp); @@ -933,12 +1084,36 @@ static int wl_android_set_csa(struct net_device *dev, char *command) error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), smbuf, sizeof(smbuf), NULL); if (error) { - ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error)); + ANDROID_ERROR(("wl_android_set_csa:set csa failed:%d\n", error)); return -1; } return 0; } -#endif +#endif /* WL_CFG80211 */ + +static int +wl_android_set_bcn_li_dtim(struct net_device *dev, char *command) +{ + int ret = 0; + int dtim; + + dtim = *(command + strlen(CMD_SETDTIM_IN_SUSPEND) + 1) - '0'; + + if (dtim > (MAX_DTIM_ALLOWED_INTERVAL / MAX_DTIM_SKIP_BEACON_INTERVAL)) { + ANDROID_ERROR(("%s: failed, invalid dtim %d\n", + __FUNCTION__, dtim)); + return BCME_ERROR; + } + + if (!(ret = net_os_set_suspend_bcn_li_dtim(dev, dtim))) { + ANDROID_TRACE(("%s: SET bcn_li_dtim in suspend %d\n", + __FUNCTION__, dtim)); + } else { + ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + + return ret; +} static int wl_android_set_max_dtim(struct net_device *dev, char *command) @@ -949,14 +1124,35 @@ wl_android_set_max_dtim(struct net_device *dev, char *command) dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0'; if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) { - ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n", - __FUNCTION__, (dtim_flag ? "Enable" : "Disable"))); + ANDROID_TRACE(("wl_android_set_max_dtim: use Max bcn_li_dtim in suspend %s\n", + (dtim_flag ? "Enable" : "Disable"))); } else { - ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_max_dtim: failed %d\n", ret)); + } + + return ret; +} + +#ifdef DISABLE_DTIM_IN_SUSPEND +static int +wl_android_set_disable_dtim_in_suspend(struct net_device *dev, char *command) +{ + int ret = 0; + int dtim_flag; + + dtim_flag = *(command + strlen(CMD_DISDTIM_IN_SUSPEND) + 1) - '0'; + + if (!(ret = net_os_set_disable_dtim_in_suspend(dev, dtim_flag))) { + ANDROID_TRACE(("wl_android_set_disable_dtim_in_suspend: " + "use Disable bcn_li_dtim in suspend %s\n", + (dtim_flag ? "Enable" : "Disable"))); + } else { + ANDROID_ERROR(("wl_android_set_disable_dtim_in_suspend: failed %d\n", ret)); } return ret; } +#endif /* DISABLE_DTIM_IN_SUSPEND */ static int wl_android_get_band(struct net_device *dev, char *command, int total_len) { @@ -971,6 +1167,37 @@ static int wl_android_get_band(struct net_device *dev, char *command, int total_ return bytes_written; } +#ifdef WL_CFG80211 +static int +wl_android_set_band(struct net_device *dev, char *command) +{ + int error = 0; + uint band = *(command + strlen(CMD_SETBAND) + 1) - '0'; +#ifdef WL_HOST_BAND_MGMT + int ret = 0; + if ((ret = wl_cfg80211_set_band(dev, band)) < 0) { + if (ret == BCME_UNSUPPORTED) { + /* If roam_var is unsupported, fallback to the original method */ + ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, " + "but roam_band iovar unsupported in the firmware\n")); + } else { + error = -1; + } + } + if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) { + /* Apply if roam_band iovar is not supported or band setting is AUTO */ + error = wldev_set_band(dev, band); + } +#else + error = wl_cfg80211_set_if_band(dev, band); +#endif /* WL_HOST_BAND_MGMT */ +#ifdef ROAM_CHANNEL_CACHE + wl_update_roamscan_cache_by_band(dev, band); +#endif /* ROAM_CHANNEL_CACHE */ + return error; +} +#endif /* WL_CFG80211 */ + #ifdef PNO_SUPPORT #define PNO_PARAM_SIZE 50 #define VALUE_SIZE 50 @@ -985,29 +1212,29 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1]; struct dhd_pno_batch_params batch_params; - ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + ANDROID_INFO(("wls_parse_batching_cmd: command=%s, len=%d\n", command, total_len)); len_remain = total_len; if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) { pos = command + strlen(CMD_WLS_BATCHING) + 1; len_remain -= strlen(CMD_WLS_BATCHING) + 1; } else { - ANDROID_ERROR(("%s: No arguments, total_len %d\n", __FUNCTION__, total_len)); + ANDROID_ERROR(("wls_parse_batching_cmd: No arguments, total_len %d\n", total_len)); err = BCME_ERROR; goto exit; } - memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); + bzero(&batch_params, sizeof(struct dhd_pno_batch_params)); if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) { pos += strlen(PNO_BATCHING_SET) + 1; } else { - ANDROID_ERROR(("%s: %s missing arguments, total_len %d\n", - __FUNCTION__, PNO_BATCHING_SET, total_len)); + ANDROID_ERROR(("wls_parse_batching_cmd: %s missing arguments, total_len %d\n", + PNO_BATCHING_SET, total_len)); err = BCME_ERROR; goto exit; } while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { - memset(param, 0, sizeof(param)); - memset(value, 0, sizeof(value)); + bzero(param, sizeof(param)); + bzero(value, sizeof(value)); if (token == NULL || !*token) break; if (*token == '\0') @@ -1032,8 +1259,9 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) tokens = sscanf(value, "<%s>", value); if (tokens != 1) { err = BCME_ERROR; - ANDROID_ERROR(("%s : invalid format for channel" - " <> params\n", __FUNCTION__)); + ANDROID_ERROR(("wls_parse_batching_cmd: invalid format" + " for channel" + " <> params\n")); goto exit; } while ((token2 = strsep(&pos2, @@ -1066,7 +1294,7 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) batch_params.rtt = simple_strtol(value, NULL, 0); ANDROID_INFO(("rtt : %d\n", batch_params.rtt)); } else { - ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param)); + ANDROID_ERROR(("wls_parse_batching_cmd : unknown param: %s\n", param)); err = BCME_ERROR; goto exit; } @@ -1075,7 +1303,7 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) if (err < 0) { ANDROID_ERROR(("failed to configure batch scan\n")); } else { - memset(command, 0, total_len); + bzero(command, total_len); err = snprintf(command, total_len, "%d", err); } } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) { @@ -1090,11 +1318,11 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) if (err < 0) { ANDROID_ERROR(("failed to stop batching scan\n")); } else { - memset(command, 0, total_len); + bzero(command, total_len); err = snprintf(command, total_len, "OK"); } } else { - ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__)); + ANDROID_ERROR(("wls_parse_batching_cmd : unknown command\n")); err = BCME_ERROR; goto exit; } @@ -1135,10 +1363,10 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t 0x00 }; #endif /* PNO_SET_DEBUG */ - ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + ANDROID_INFO(("wl_android_set_pno_setup: command=%s, len=%d\n", command, total_len)); if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { - ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + ANDROID_ERROR(("wl_android_set_pno_setup: argument=%d less min size\n", total_len)); goto exit_proc; } #ifdef PNO_SET_DEBUG @@ -1149,7 +1377,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET); cmd_tlv_temp = (cmd_tlv_t *)str_ptr; - memset(ssids_local, 0, sizeof(ssids_local)); + bzero(ssids_local, sizeof(ssids_local)); if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && (cmd_tlv_temp->version == PNO_TLV_VERSION) && @@ -1164,36 +1392,38 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t goto exit_proc; } else { if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { - ANDROID_ERROR(("%s scan duration corrupted field size %d\n", - __FUNCTION__, tlv_size_left)); + ANDROID_ERROR(("wl_android_set_pno_setup: scan duration corrupted" + " field size %d\n", + tlv_size_left)); goto exit_proc; } str_ptr++; pno_time = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + ANDROID_INFO(("wl_android_set_pno_setup: pno_time=%d\n", pno_time)); if (str_ptr[0] != 0) { if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { - ANDROID_ERROR(("%s pno repeat : corrupted field\n", - __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_pno_setup: pno repeat:" + " corrupted field\n")); goto exit_proc; } str_ptr++; pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + ANDROID_INFO(("wl_android_set_pno_setup: got pno_repeat=%d\n", + pno_repeat)); if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { - ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", - __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_pno_setup: FREQ_EXPO_MAX" + " corrupted field size\n")); goto exit_proc; } str_ptr++; pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s: pno_freq_expo_max=%d\n", - __FUNCTION__, pno_freq_expo_max)); + ANDROID_INFO(("wl_android_set_pno_setup: pno_freq_expo_max=%d\n", + pno_freq_expo_max)); } } } else { - ANDROID_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_pno_setup: get wrong TLV command\n")); goto exit_proc; } @@ -1212,125 +1442,19 @@ static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, i #define MAC_ADDR_STR_LEN 18 if (total_len < MAC_ADDR_STR_LEN) { - ANDROID_ERROR(("%s: buflen %d is less than p2p dev addr\n", - __FUNCTION__, total_len)); + ANDROID_ERROR(("wl_android_get_p2p_dev_addr: buflen %d is less than p2p dev addr\n", + total_len)); return -1; } ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr); if (ret) { - ANDROID_ERROR(("%s Failed to get p2p dev addr\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_get_p2p_dev_addr: Failed to get p2p dev addr\n")); return -1; } return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr))); } -#ifdef BCMCCX -static int wl_android_get_cckm_rn(struct net_device *dev, char *command) -{ - int error, rn; - - WL_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name)); - - error = wldev_iovar_getint(dev, "cckm_rn", &rn); - if (unlikely(error)) { - ANDROID_ERROR(("wl_android_get_cckm_rn error (%d)\n", error)); - return -1; - } - memcpy(command, &rn, sizeof(int)); - - return sizeof(int); -} - -static int -wl_android_set_cckm_krk(struct net_device *dev, char *command, int total_len) -{ - int error, key_len, skip_len; - unsigned char key[CCKM_KRK_LEN + CCKM_BTK_LEN]; - char iovar_buf[WLC_IOCTL_SMLEN]; - - WL_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name)); - - skip_len = strlen("set cckm_krk")+1; - - if (total_len < (skip_len + CCKM_KRK_LEN)) { - return BCME_BADLEN; - } - - if (total_len >= skip_len + CCKM_KRK_LEN + CCKM_BTK_LEN) { - key_len = CCKM_KRK_LEN + CCKM_BTK_LEN; - } else { - key_len = CCKM_KRK_LEN; - } - - memset(iovar_buf, 0, sizeof(iovar_buf)); - memcpy(key, command+skip_len, key_len); - - ANDROID_INFO(("CCKM KRK-BTK (%d/%d) :\n", key_len, total_len)); - if (wl_dbg_level & WL_DBG_DBG) { - prhex(NULL, key, key_len); - } - - error = wldev_iovar_setbuf(dev, "cckm_krk", key, key_len, - iovar_buf, WLC_IOCTL_SMLEN, NULL); - if (unlikely(error)) { - ANDROID_ERROR((" cckm_krk set error (%d)\n", error)); - return -1; - } - return 0; -} - -static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command, int total_len) -{ - int error; - u8 buf[WL_ASSOC_INFO_MAX]; - wl_assoc_info_t assoc_info; - u32 resp_ies_len = 0; - int bytes_written = 0; - - WL_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name)); - - error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL); - if (unlikely(error)) { - ANDROID_ERROR(("could not get assoc info (%d)\n", error)); - return -1; - } - - memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t)); - assoc_info.req_len = htod32(assoc_info.req_len); - assoc_info.resp_len = htod32(assoc_info.resp_len); - assoc_info.flags = htod32(assoc_info.flags); - - if (assoc_info.resp_len) { - resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp); - } - - if (total_len < (sizeof(u32) + resp_ies_len)) { - ANDROID_ERROR(("%s: Insufficient memory, %d bytes\n", - __FUNCTION__, total_len)); - return -1; - } - /* first 4 bytes are ie len */ - memcpy(command, &resp_ies_len, sizeof(u32)); - bytes_written = sizeof(u32); - - /* get the association resp IE's if there are any */ - if (resp_ies_len) { - error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0, - buf, WL_ASSOC_INFO_MAX, NULL); - if (unlikely(error)) { - ANDROID_ERROR(("could not get assoc resp_ies (%d)\n", error)); - return -1; - } - - memcpy(command+sizeof(u32), buf, resp_ies_len); - bytes_written += resp_ies_len; - } - return bytes_written; -} - -#endif /* BCMCCX */ - int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist) { @@ -1342,21 +1466,22 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * /* set filtering mode */ if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) { - ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACMODE error=%d\n", ret)); return ret; } if (macmode != MACLIST_MODE_DISABLED) { /* set the MAC filter list */ if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist, sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) { - ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACLIST error=%d\n", ret)); return ret; } /* get the current list of associated STAs */ assoc_maclist->count = MAX_NUM_OF_ASSOCLIST; if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf))) != 0) { - ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_ap_mac_list: WLC_GET_ASSOCLIST error=%d\n", + ret)); return ret; } /* do we have any STA associated? */ @@ -1366,8 +1491,9 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * match = 0; /* compare with each entry */ for (j = 0; j < maclist->count; j++) { - ANDROID_INFO(("%s : associated="MACDBG " list="MACDBG "\n", - __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet), + ANDROID_INFO(("wl_android_set_ap_mac_list: associated="MACDBG + "list = "MACDBG "\n", + MAC2STRDBG(assoc_maclist->ea[i].octet), MAC2STRDBG(maclist->ea[j].octet))); if (memcmp(assoc_maclist->ea[i].octet, maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) { @@ -1387,8 +1513,10 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * if ((ret = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scb_val_t))) != 0) - ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n", - __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_ap_mac_list:" + " WLC_SCB_DEAUTHENTICATE" + " error=%d\n", + ret)); } } } @@ -1423,7 +1551,7 @@ wl_android_set_mac_address_filter(struct net_device *dev, char* str) macmode = bcm_atoi(token); if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) { - ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode)); + ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid macmode %d\n", macmode)); return -1; } @@ -1433,15 +1561,16 @@ wl_android_set_mac_address_filter(struct net_device *dev, char* str) } macnum = bcm_atoi(token); if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { - ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n", - __FUNCTION__, macnum)); + ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid number of MAC" + " address entries %d\n", + macnum)); return -1; } /* allocate memory for the MAC list */ list = (struct maclist*) MALLOCZ(dhd->osh, sizeof(int) + sizeof(struct ether_addr) * macnum); if (!list) { - ANDROID_ERROR(("%s : failed to allocate memory\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_mac_address_filter : failed to allocate memory\n")); return -1; } /* prepare the MAC list */ @@ -1450,25 +1579,28 @@ wl_android_set_mac_address_filter(struct net_device *dev, char* str) for (i = 0; i < list->count; i++) { token = strsep((char**)&str, " "); if (token == NULL) { - ANDROID_ERROR(("%s : No mac address present\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_set_mac_address_filter : No mac address present\n")); ret = -EINVAL; goto exit; } - strncpy(eabuf, token, ETHER_ADDR_STR_LEN - 1); + strlcpy(eabuf, token, sizeof(eabuf)); if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) { - ANDROID_ERROR(("%s : mac parsing err index=%d, addr=%s\n", - __FUNCTION__, i, eabuf)); + ANDROID_ERROR(("wl_android_set_mac_address_filter : mac parsing err index=%d," + " addr=%s\n", + i, eabuf)); list->count = i; break; } - ANDROID_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf)); + ANDROID_INFO(("wl_android_set_mac_address_filter : %d/%d MACADDR=%s", + i, list->count, eabuf)); } if (i == 0) goto exit; /* set the list */ if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0) - ANDROID_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_set_mac_address_filter: Setting MAC list failed error=%d\n", + ret)); exit: MFREE(dhd->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum); @@ -1486,13 +1618,12 @@ int wl_android_wifi_on(struct net_device *dev) int retry = POWERUP_MAX_RETRY; if (!dev) { - ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_wifi_on: dev is null\n")); return -EINVAL; } - printf("%s in 1\n", __FUNCTION__); dhd_net_if_lock(dev); - printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on); + WL_MSG(dev->name, "in g_wifi_on=%d\n", g_wifi_on); if (!g_wifi_on) { do { dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY); @@ -1514,6 +1645,9 @@ int wl_android_wifi_on(struct net_device *dev) } while (retry-- > 0); if (ret != 0) { ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n")); +#ifdef BCM_DETECT_TURN_ON_FAILURE + BUG_ON(1); +#endif /* BCM_DETECT_TURN_ON_FAILURE */ goto exit; } #if defined(BCMSDIO) || defined(BCMDBUS) @@ -1536,7 +1670,7 @@ int wl_android_wifi_on(struct net_device *dev) } exit: - printf("%s: Success\n", __FUNCTION__); + WL_MSG(dev->name, "Success\n"); dhd_net_if_unlock(dev); return ret; @@ -1547,7 +1681,7 @@ err: dhd_net_bus_suspend(dev); #endif /* BCMSDIO */ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); - printf("%s: Failed\n", __FUNCTION__); + WL_MSG(dev->name, "Failed\n"); dhd_net_if_unlock(dev); return ret; #endif /* BCMSDIO || BCMDBUS */ @@ -1562,27 +1696,26 @@ int wl_android_wifi_off(struct net_device *dev, bool on_failure) return -EINVAL; } - printf("%s in 1\n", __FUNCTION__); #if defined(BCMPCIE) && defined(DHD_DEBUG_UART) ret = dhd_debug_uart_is_running(dev); if (ret) { - ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_wifi_off: - Debug UART App is running\n")); return -EBUSY; } #endif /* BCMPCIE && DHD_DEBUG_UART */ dhd_net_if_lock(dev); - printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure); + WL_MSG(dev->name, "in g_wifi_on=%d, on_failure=%d\n", g_wifi_on, on_failure); if (g_wifi_on || on_failure) { #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) ret = dhd_net_bus_devreset(dev, TRUE); -#if defined(BCMSDIO) +#ifdef BCMSDIO dhd_net_bus_suspend(dev); #endif /* BCMSDIO */ #endif /* BCMSDIO || BCMPCIE || BCMDBUS */ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); g_wifi_on = FALSE; } - printf("%s out\n", __FUNCTION__); + WL_MSG(dev->name, "out\n"); dhd_net_if_unlock(dev); return ret; @@ -1606,7 +1739,7 @@ wl_chanim_stats(struct net_device *dev, u8 *chan_idle) u8 result[WLC_IOCTL_SMLEN]; chanim_stats_t *stats; - memset(¶m, 0, sizeof(param)); + bzero(¶m, sizeof(param)); param.buflen = htod32(sizeof(wl_chanim_stats_t)); param.count = htod32(WL_CHANIM_COUNT_ONE); @@ -1658,6 +1791,7 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total #ifndef DISABLE_IF_COUNTERS wl_if_stats_t* if_stats = NULL; struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev); #endif /* DISABLE_IF_COUNTERS */ int link_speed = 0; @@ -1666,17 +1800,18 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total int bytes_written = -1; int ret = 0; - ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__)); + ANDROID_INFO(("wl_android_get_connection_stats: enter Get Connection Stats\n")); if (total_len <= 0) { - ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len)); + ANDROID_ERROR(("wl_android_get_connection_stats: invalid buffer size %d\n", total_len)); goto error; } bufsize = total_len; if (bufsize < sizeof(struct connection_stats)) { - ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n", - __FUNCTION__, bufsize, + ANDROID_ERROR(("wl_android_get_connection_stats: not enough buffer size, provided=%u," + " requires=%zu\n", + bufsize, sizeof(struct connection_stats))); goto error; } @@ -1686,21 +1821,24 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total #ifndef DISABLE_IF_COUNTERS if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats)); if (if_stats == NULL) { - ANDROID_ERROR(("%s(%d): MALLOCZ failed\n", __FUNCTION__, __LINE__)); + ANDROID_ERROR(("wl_android_get_connection_stats: MALLOCZ failed\n")); goto error; } - memset(if_stats, 0, sizeof(*if_stats)); + bzero(if_stats, sizeof(*if_stats)); if (FW_SUPPORTED(dhdp, ifst)) { ret = wl_cfg80211_ifstats_counters(dev, if_stats); - } else { + } else + { ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, (char *)if_stats, sizeof(*if_stats), NULL); } + ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, + (char *)if_stats, sizeof(*if_stats), NULL); if (ret) { - ANDROID_ERROR(("%s: if_counters not supported ret=%d\n", - __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_get_connection_stats: if_counters not supported ret=%d\n", + ret)); /* In case if_stats IOVAR is not supported, get information from counters. */ #endif /* DISABLE_IF_COUNTERS */ @@ -1712,13 +1850,14 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total } ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); if (ret != BCME_OK) { - ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", - __FUNCTION__, ret)); + ANDROID_ERROR(("wl_android_get_connection_stats:" + " wl_cntbuf_to_xtlv_format ERR %d\n", + ret)); goto error; } if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { - ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__)); + ANDROID_ERROR(("wl_android_get_connection_stats: wlc_cnt NULL!\n")); goto error; } @@ -1737,8 +1876,10 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total } else { /* Populate from if_stats. */ if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) { - ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n", - __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version)); + ANDROID_ERROR(("wl_android_get_connection_stats: incorrect version of" + " wl_if_stats_t," + " expected=%u got=%u\n", + WL_IF_STATS_T_VERSION, if_stats->version)); goto error; } @@ -1765,8 +1906,9 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total /* link_speed is in kbps */ ret = wldev_get_link_speed(dev, &link_speed); if (ret || link_speed < 0) { - ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n", - __FUNCTION__, ret, link_speed)); + ANDROID_ERROR(("wl_android_get_connection_stats: wldev_get_link_speed()" + " failed, ret=%d, speed=%d\n", + ret, link_speed)); goto error; } @@ -1805,7 +1947,7 @@ wl_android_process_natoe_cmd(struct net_device *dev, char *command, int total_le /* If natoe subcmd name is not provided, return error */ if (*pcmd == '\0') { - ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__)); + ANDROID_ERROR(("natoe subcmd not provided wl_android_process_natoe_cmd\n")); ret = -EINVAL; return ret; } @@ -1941,7 +2083,7 @@ wl_natoe_get_ioctl(struct net_device *dev, wl_natoe_ioc_t *natoe_ioc, } else { - ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res)); + ANDROID_ERROR(("wl_natoe_get_ioctl: get command failed code %d\n", res)); res = BCME_ERROR; } @@ -1986,7 +2128,7 @@ wl_android_natoe_subcmd_enable(struct net_device *dev, const wl_natoe_sub_cmd_t ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, cmd_info); if (ret != BCME_OK) { - ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_enable\n")); ret = -EINVAL; } } else { /* set */ @@ -2064,7 +2206,7 @@ wl_android_natoe_subcmd_config_ips(struct net_device *dev, ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, cmd_info); if (ret != BCME_OK) { - ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ips\n")); ret = -EINVAL; } } else { /* set */ @@ -2072,7 +2214,7 @@ wl_android_natoe_subcmd_config_ips(struct net_device *dev, /* save buflen at start */ uint16 buflen_at_start = buflen; - memset(&config_ips, 0, sizeof(config_ips)); + bzero(&config_ips, sizeof(config_ips)); str = bcmstrtok(&pcmd, " ", NULL); if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) { @@ -2184,7 +2326,7 @@ wl_android_natoe_subcmd_config_ports(struct net_device *dev, ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, cmd_info); if (ret != BCME_OK) { - ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ports\n")); ret = -EINVAL; } } else { /* set */ @@ -2192,7 +2334,7 @@ wl_android_natoe_subcmd_config_ports(struct net_device *dev, /* save buflen at start */ uint16 buflen_at_start = buflen; - memset(&ports_config, 0, sizeof(ports_config)); + bzero(&ports_config, sizeof(ports_config)); str = bcmstrtok(&pcmd, " ", NULL); if (!str) { @@ -2283,7 +2425,7 @@ wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, const wl_natoe_sub_cmd ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MAXLEN, cmd_info); if (ret != BCME_OK) { - ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_dbg_stats\n")); ret = -EINVAL; } } else { /* set */ @@ -2360,7 +2502,7 @@ wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, const wl_natoe_sub_cmd_t ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, cmd_info); if (ret != BCME_OK) { - ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_tbl_cnt\n")); ret = -EINVAL; } } else { /* set */ @@ -2590,6 +2732,9 @@ wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, const wl_drv_sub_cmd ret = -EINVAL; goto exit; } + /* Skip for CUSTOMER_HW4 - WNM notification + * for cellular data capability is handled by host + */ /* send a WNM notification request to associated AP */ if (wl_get_drv_status(cfg, CONNECTED, dev)) { ANDROID_INFO(("Sending WNM Notif\n")); @@ -2830,6 +2975,13 @@ exit: } #endif /* WL_MBO */ +#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL) +extern int wl_cfg80211_send_msg_to_ril(void); +extern void wl_cfg80211_register_dev_ril_bridge_event_notifier(void); +extern void wl_cfg80211_unregister_dev_ril_bridge_event_notifier(void); +extern int g_mhs_chan_for_cpcoex; +#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */ + #if defined(WL_SUPPORT_AUTO_CHANNEL) /* SoftAP feature */ #define APCS_BAND_2G_LEGACY1 20 @@ -2841,6 +2993,74 @@ exit: #define APCS_MAX_RETRY 10 #define APCS_DEFAULT_2G_CH 1 #define APCS_DEFAULT_5G_CH 149 + +#ifdef WL_ESCAN +static int +wl_android_escan_autochannel(struct net_device *dev, uint32 band) +{ + struct dhd_pub *dhd = dhd_get_pub(dev); + wlc_ssid_t ssid; + struct wl_escan_info *escan = NULL; + int ret = 0, retry = 0, retry_max, retry_interval = 250, channel = 0, up = 1; +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif + + escan = dhd->escan; + if (dhd) { + retry_max = WL_ESCAN_TIMER_INTERVAL_MS/retry_interval; + memset(&ssid, 0, sizeof(ssid)); + ret = wldev_ioctl_get(dev, WLC_GET_UP, &up, sizeof(s32)); + if (ret < 0 || up == 0) { + ret = wldev_ioctl_set(dev, WLC_UP, &up, sizeof(s32)); + } + retry = retry_max; + while (retry--) { +#ifdef WL_CFG80211 + if (wl_get_drv_status_all(cfg, SCANNING) || + escan->escan_state == ESCAN_STATE_SCANING) +#else + if (escan->escan_state == ESCAN_STATE_SCANING) +#endif + { + ANDROID_INFO(("Scanning %d tried, ret = %d\n", + (retry_max - retry), ret)); + } else { + escan->autochannel = 1; + ret = wl_escan_set_scan(dev, dhd, &ssid, TRUE); + if (!ret) + break; + } + OSL_SLEEP(retry_interval); + } + if ((retry == 0) || (ret < 0)) + goto done; + retry = retry_max; + while (retry--) { + if (escan->escan_state == ESCAN_STATE_IDLE) { + if (band == WLC_BAND_5G) + channel = escan->best_5g_ch; + else + channel = escan->best_2g_ch; + WL_MSG(dev->name, "selected channel = %d\n", channel); + goto done; + } + ANDROID_INFO(("escan_state=%d, %d tried, ret = %d\n", + escan->escan_state, (retry_max - retry), ret)); + OSL_SLEEP(retry_interval); + } + if ((retry == 0) || (ret < 0)) + goto done; + } + +done: + if (escan) + escan->autochannel = 0; + + return channel; +} +#endif /* WL_ESCAN */ + static int wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, char* command, int total_len) @@ -2851,19 +3071,17 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, int ret = 0; int spect = 0; u8 *reqbuf = NULL; - uint32 band = WLC_BAND_2G; + uint32 band = WLC_BAND_2G, sta_band = WLC_BAND_2G; uint32 buf_size; struct bcm_cfg80211 *cfg = wl_get_cfg(dev); - char *pos = command; - int band_new, band_cur; if (cmd_str) { ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); - if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { + if (strnicmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { band = WLC_BAND_AUTO; - } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { + } else if (strnicmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { band = WLC_BAND_5G; - } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { + } else if (strnicmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { band = WLC_BAND_2G; } else { /* @@ -2875,58 +3093,91 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, (channel == APCS_BAND_2G_LEGACY2)) { band = WLC_BAND_2G; } else { - ANDROID_ERROR(("%s: Invalid argument\n", __FUNCTION__)); + ANDROID_ERROR(("Invalid argument\n")); return -EINVAL; } } } else { /* If no argument is provided, default to 2G */ - ANDROID_ERROR(("%s: No argument given default to 2.4G scan\n", __FUNCTION__)); + ANDROID_ERROR(("No argument given default to 2.4G scan\n")); band = WLC_BAND_2G; } - ANDROID_INFO(("%s : HAPD_AUTO_CHANNEL = %d, band=%d \n", __FUNCTION__, channel, band)); + ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band)); + +#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL) + wl_cfg80211_register_dev_ril_bridge_event_notifier(); + if (band == WLC_BAND_2G) { + wl_cfg80211_send_msg_to_ril(); - ret = wldev_ioctl_set(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur)); + if (g_mhs_chan_for_cpcoex) { + channel = g_mhs_chan_for_cpcoex; + g_mhs_chan_for_cpcoex = 0; + goto done2; + } + } + wl_cfg80211_unregister_dev_ril_bridge_event_notifier(); +#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */ /* If STA is connected, return is STA channel, else ACS can be issued, * set spect to 0 and proceed with ACS */ channel = wl_cfg80211_get_sta_channel(cfg); if (channel) { - channel = (channel <= CH_MAX_2G_CHANNEL) ? - channel : APCS_DEFAULT_2G_CH; + sta_band = WL_GET_BAND(channel); + switch (sta_band) { + case (WL_CHANSPEC_BAND_5G): { + if (band == WLC_BAND_2G || band == WLC_BAND_AUTO) { + channel = APCS_DEFAULT_2G_CH; + } + break; + } + case (WL_CHANSPEC_BAND_2G): { + if (band == WLC_BAND_5G) { + channel = APCS_DEFAULT_5G_CH; + } + break; + } + default: + /* Intentional fall through to use same sta channel for softap */ + break; + } + WL_MSG(dev->name, "band=%d, sta_band=%d, channel=%d\n", band, sta_band, channel); goto done2; } +#ifdef WL_ESCAN + channel = wl_android_escan_autochannel(dev, band); + if (channel) + goto done2; + else + goto done; +#endif /* WL_ESCAN */ + ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect)); if (ret) { - ANDROID_ERROR(("%s: ACS: error getting the spect, ret=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("ACS: error getting the spect, ret=%d\n", ret)); goto done; } if (spect > 0) { ret = wl_cfg80211_set_spect(dev, 0); if (ret < 0) { - ANDROID_ERROR(("%s: ACS: error while setting spect, ret=%d\n", __FUNCTION__, ret)); + ANDROID_ERROR(("ACS: error while setting spect, ret=%d\n", ret)); goto done; } } reqbuf = (u8 *)MALLOCZ(cfg->osh, CHANSPEC_BUF_SIZE); if (reqbuf == NULL) { - ANDROID_ERROR(("%s: failed to allocate chanspec buffer\n", __FUNCTION__)); + ANDROID_ERROR(("failed to allocate chanspec buffer\n")); return -ENOMEM; } if (band == WLC_BAND_AUTO) { - ANDROID_INFO(("%s: ACS full channel scan \n", __FUNCTION__)); + ANDROID_INFO(("ACS full channel scan \n")); reqbuf[0] = htod32(0); } else if (band == WLC_BAND_5G) { - band_new = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G; - ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_new, sizeof(band_new)); - if (ret < 0) - WL_ERR(("WLC_SET_BAND error %d\n", ret)); - ANDROID_INFO(("%s: ACS 5G band scan \n", __FUNCTION__)); + ANDROID_INFO(("ACS 5G band scan \n")); if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n")); goto done; @@ -2936,7 +3187,7 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, * If channel argument is not provided/ argument 20 is provided, * Restrict channel to 2GHz, 20MHz BW, No SB */ - ANDROID_INFO(("%s: ACS 2G band scan \n", __FUNCTION__)); + ANDROID_INFO(("ACS 2G band scan \n")); if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n")); goto done; @@ -2946,12 +3197,11 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, goto done2; } - buf_size = CHANSPEC_BUF_SIZE; + buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE; ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, buf_size); if (ret < 0) { - ANDROID_ERROR(("%s: can't start auto channel scan, err = %d\n", - __FUNCTION__, ret)); + ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret)); channel = 0; goto done; } @@ -2977,18 +3227,6 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, chosen = dtoh32(chosen); } - if ((ret == 0) && (dtoh32(chosen) != 0)) { - uint chip; - chip = dhd_conf_get_chip(dhd_get_pub(dev)); - if (chip != BCM43143_CHIP_ID) { - u32 chanspec = 0; - chanspec = wl_chspec_driver_to_host(chosen); - ANDROID_INFO(("%s: selected chanspec = 0x%x\n", __FUNCTION__, chanspec)); - chosen = wf_chspec_ctlchan(chanspec); - ANDROID_INFO(("%s: selected chosen = 0x%x\n", __FUNCTION__, chosen)); - } - } - if (chosen) { int chosen_band; int apcs_band; @@ -3003,15 +3241,12 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, #endif /* D11AC_IOTYPES */ apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; - if (band == WLC_BAND_AUTO) { - printf("%s: selected channel = %d\n", __FUNCTION__, channel); - break; - } else if (apcs_band == chosen_band) { - printf("%s: selected channel = %d\n", __FUNCTION__, channel); + if (apcs_band == chosen_band) { + WL_MSG(dev->name, "selected channel = %d\n", channel); break; } } - ANDROID_INFO(("%s: %d tried, ret = %d, chosen = 0x%x\n", __FUNCTION__, + ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x\n", (APCS_MAX_RETRY - retry), ret, chosen)); OSL_SLEEP(250); } @@ -3024,16 +3259,12 @@ done: } else { channel = APCS_DEFAULT_2G_CH; } - ANDROID_ERROR(("%s: ACS failed." - " Fall back to default channel (%d) \n", __FUNCTION__, channel)); + ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel)); } done2: - ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur)); - if (ret < 0) - WL_ERR(("WLC_SET_BAND error %d\n", ret)); if (spect > 0) { if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) { - ANDROID_ERROR(("%s: ACS: error while setting spect\n", __FUNCTION__)); + ANDROID_ERROR(("ACS: error while setting spect\n")); } } @@ -3042,16 +3273,11 @@ done2: } if (channel) { - if (channel < 15) - pos += snprintf(pos, total_len, "2g="); - else - pos += snprintf(pos, total_len, "5g="); - pos += snprintf(pos, total_len, "%d", channel); - ANDROID_INFO(("%s: command result is %s \n", __FUNCTION__, command)); - return strlen(command); - } else { - return ret; + ret = snprintf(command, total_len, "%d", channel); + ANDROID_INFO(("command result is %s \n", command)); } + + return ret; } #endif /* WL_SUPPORT_AUTO_CHANNEL */ @@ -3133,8 +3359,7 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in return -ENOMEM; } /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ - strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1); - vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + strlcpy(vndr_ie->cmd, "add", sizeof(vndr_ie->cmd)); /* Set the IE count - the buffer contains only 1 IE */ iecount = htod32(1); @@ -3163,7 +3388,7 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in } return -ENOMEM; } - memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */ + bzero(ioctl_buf, WLC_IOCTL_MEDLEN); /* init the buffer */ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { ANDROID_ERROR(("Find index failed\n")); err = BCME_ERROR; @@ -3189,7 +3414,7 @@ end: return err; } -#endif +#endif /* WL_CFG80211 */ #if defined(BCMFW_ROAM_ENABLE) static int @@ -3227,9 +3452,9 @@ wl_android_set_roampref(struct net_device *dev, char *command, int total_len) if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE)) return -1; - memset(buf, 0, sizeof(buf)); - memset(akm_suites, 0, sizeof(akm_suites)); - memset(ucipher_suites, 0, sizeof(ucipher_suites)); + bzero(buf, sizeof(buf)); + bzero(akm_suites, sizeof(akm_suites)); + bzero(ucipher_suites, sizeof(ucipher_suites)); /* Save the AKM suites passed in the command */ for (i = 0; i < num_akm_suites; i++) { @@ -3311,7 +3536,7 @@ wl_android_set_roampref(struct net_device *dev, char *command, int total_len) memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN); pref += WPA_SUITE_LEN; /* Set to 0 to match any available multicast cipher */ - memset(pref, 0, WPA_SUITE_LEN); + bzero(pref, WPA_SUITE_LEN); pref += WPA_SUITE_LEN; } } @@ -3393,16 +3618,10 @@ wl_android_iolist_resume(struct net_device *dev, struct list_head *head) struct list_head *cur, *q; s32 ret = 0; struct bcm_cfg80211 *cfg = wl_get_cfg(dev); - -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#endif // endif + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); list_for_each_safe(cur, q, head) { config = list_entry(cur, struct io_cfg, list); -#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) -#pragma GCC diagnostic pop -#endif // endif + GCC_DIAGNOSTIC_POP(); if (config->iovar) { if (!ret) ret = wldev_iovar_setint(dev, config->iovar, @@ -3440,7 +3659,8 @@ wl_android_set_miracast(struct net_device *dev, char *command) wl_android_iolist_resume(dev, &miracast_resume_list); miracast_cur_mode = MIRACAST_MODE_OFF; - memset((void *)&config, 0, sizeof(config)); + + bzero((void *)&config, sizeof(config)); switch (mode) { case MIRACAST_MODE_SOURCE: #ifdef MIRACAST_MCHAN_ALGO @@ -3527,8 +3747,9 @@ resume: wl_android_iolist_resume(dev, &miracast_resume_list); return ret; } -#endif +#endif /* WL_CFG80211 */ +#ifdef WL_RELMCAST #define NETLINK_OXYGEN 30 #define AIBSS_BEACON_TIMEOUT 10 @@ -3614,6 +3835,7 @@ wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size) nlmsg_failure: return ret; } +#endif /* WL_RELMCAST */ int wl_keep_alive_set(struct net_device *dev, char* extra) { @@ -3633,7 +3855,7 @@ int wl_keep_alive_set(struct net_device *dev, char* extra) } ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); - memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t)); mkeep_alive_pkt.period_msec = period_msec; mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); @@ -3700,7 +3922,7 @@ wl_tbow_teardown(struct net_device *dev) int err = BCME_OK; char buf[WLC_IOCTL_SMLEN]; tbow_setup_netinfo_t netinfo; - memset(&netinfo, 0, sizeof(netinfo)); + bzero(&netinfo, sizeof(netinfo)); netinfo.opmode = TBOW_HO_MODE_TEARDOWN; err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo, @@ -3747,9 +3969,14 @@ static int wl_android_get_link_status(struct net_device *dev, char *command, uint encode, txexp; wl_bss_info_t *bi; int datalen = sizeof(uint32) + sizeof(wl_bss_info_t); - char buf[datalen]; + char buf[WLC_IOCTL_SMLEN]; + + if (datalen > WLC_IOCTL_SMLEN) { + ANDROID_ERROR(("data too big\n")); + return -1; + } - memset(buf, 0, datalen); + bzero(buf, datalen); /* get BSS information */ *(u32 *) buf = htod32(datalen); error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen); @@ -3872,12 +4099,14 @@ wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg) s32 bssidx; int ret = 0; int p2plo_pause = 0; - dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd_pub_t *dhd = NULL; if (!cfg || !cfg->p2p) { ANDROID_ERROR(("Wl %p or cfg->p2p %p is null\n", cfg, cfg ? cfg->p2p : 0)); return 0; } + + dhd = (dhd_pub_t *)(cfg->pub); if (!dhd->up) { ANDROID_ERROR(("bus is already down\n")); return ret; @@ -4006,6 +4235,35 @@ wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) } return ret; } +void +wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev; + if (!cfg) { + return; + } + + wdev = bcmcfg_to_p2p_wdev(cfg); + + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_INFORM_MEM(("P2P_FIND: Discovery offload is already in progress." + "it aborted\n")); + wl_clr_p2p_status(cfg, DISC_IN_PROGRESS); + if (wdev != NULL) { +#if defined(WL_CFG80211_P2P_DEV_IF) + cfg80211_remain_on_channel_expired(wdev, + cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); +#else + cfg80211_remain_on_channel_expired(wdev, + cfg->last_roc_id, + &cfg->remain_on_chan, + cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + wl_cfg80211_p2plo_deinit(cfg); + } +} #endif /* P2P_LISTEN_OFFLOADING */ #ifdef WL_MURX @@ -4031,7 +4289,7 @@ wl_android_murx_bfe_cap(struct net_device *dev, int val) } /* If successful intiate a reassoc */ - memset(&bssid, 0, ETHER_ADDR_LEN); + bzero(&bssid, ETHER_ADDR_LEN); if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) { ANDROID_ERROR(("Failed to get bssid, error=%d\n", err)); return err; @@ -4064,7 +4322,7 @@ wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len int bytes_written = 0; bool mimo_rssi = FALSE; - memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t)); + bzero(&rssi_ant_mimo, sizeof(wl_rssi_ant_mimo_t)); /* * STA I/F: DRIVER GET_RSSI_PER_ANT * AP/GO I/F: DRIVER GET_RSSI_PER_ANT @@ -4132,7 +4390,7 @@ wl_android_set_rssi_logging(struct net_device *dev, char *command, int total_len char *pos, *token; int err = BCME_OK; - memset(&set_param, 0, sizeof(rssilog_set_param_t)); + bzero(&set_param, sizeof(rssilog_set_param_t)); /* * DRIVER SET_RSSI_LOGGING